xref: /freebsd/sys/netinet/if_ether.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)if_ether.c	8.1 (Berkeley) 6/10/93
32  */
33 
34 /*
35  * Ethernet address resolution protocol.
36  * TODO:
37  *	add "inuse/lock" bit (or ref. count) along with valid bit
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 
43 #include <sys/param.h>
44 #include <sys/eventhandler.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/mbuf.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/socket.h>
54 #include <sys/syslog.h>
55 
56 #include <net/if.h>
57 #include <net/if_var.h>
58 #include <net/if_dl.h>
59 #include <net/if_private.h>
60 #include <net/if_types.h>
61 #include <net/netisr.h>
62 #include <net/ethernet.h>
63 #include <net/route.h>
64 #include <net/route/nhop.h>
65 #include <net/vnet.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_fib.h>
69 #include <netinet/in_var.h>
70 #include <net/if_llatbl.h>
71 #include <netinet/if_ether.h>
72 #ifdef INET
73 #include <netinet/ip_carp.h>
74 #endif
75 
76 #include <security/mac/mac_framework.h>
77 
78 #define SIN(s) ((const struct sockaddr_in *)(s))
79 
80 static struct timeval arp_lastlog;
81 static int arp_curpps;
82 static int arp_maxpps = 1;
83 
84 /* Simple ARP state machine */
85 enum arp_llinfo_state {
86 	ARP_LLINFO_INCOMPLETE = 0, /* No LLE data */
87 	ARP_LLINFO_REACHABLE,	/* LLE is valid */
88 	ARP_LLINFO_VERIFY,	/* LLE is valid, need refresh */
89 	ARP_LLINFO_DELETED,	/* LLE is deleted */
90 };
91 
92 SYSCTL_DECL(_net_link_ether);
93 static SYSCTL_NODE(_net_link_ether, PF_INET, inet,
94     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
95     "");
96 static SYSCTL_NODE(_net_link_ether, PF_ARP, arp,
97     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
98     "");
99 
100 /* timer values */
101 VNET_DEFINE_STATIC(int, arpt_keep) = (20*60);	/* once resolved, good for 20
102 						 * minutes */
103 VNET_DEFINE_STATIC(int, arp_maxtries) = 5;
104 VNET_DEFINE_STATIC(int, arp_proxyall) = 0;
105 VNET_DEFINE_STATIC(int, arpt_down) = 20;	/* keep incomplete entries for
106 						 * 20 seconds */
107 VNET_DEFINE_STATIC(int, arpt_rexmit) = 1;	/* retransmit arp entries, sec*/
108 VNET_PCPUSTAT_DEFINE(struct arpstat, arpstat);  /* ARP statistics, see if_arp.h */
109 VNET_PCPUSTAT_SYSINIT(arpstat);
110 
111 #ifdef VIMAGE
112 VNET_PCPUSTAT_SYSUNINIT(arpstat);
113 #endif /* VIMAGE */
114 
115 VNET_DEFINE_STATIC(int, arp_maxhold) = 16;
116 
117 #define	V_arpt_keep		VNET(arpt_keep)
118 #define	V_arpt_down		VNET(arpt_down)
119 #define	V_arpt_rexmit		VNET(arpt_rexmit)
120 #define	V_arp_maxtries		VNET(arp_maxtries)
121 #define	V_arp_proxyall		VNET(arp_proxyall)
122 #define	V_arp_maxhold		VNET(arp_maxhold)
123 
124 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age, CTLFLAG_VNET | CTLFLAG_RW,
125 	&VNET_NAME(arpt_keep), 0,
126 	"ARP entry lifetime in seconds");
127 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries, CTLFLAG_VNET | CTLFLAG_RW,
128 	&VNET_NAME(arp_maxtries), 0,
129 	"ARP resolution attempts before returning error");
130 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall, CTLFLAG_VNET | CTLFLAG_RW,
131 	&VNET_NAME(arp_proxyall), 0,
132 	"Enable proxy ARP for all suitable requests");
133 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, wait, CTLFLAG_VNET | CTLFLAG_RW,
134 	&VNET_NAME(arpt_down), 0,
135 	"Incomplete ARP entry lifetime in seconds");
136 SYSCTL_VNET_PCPUSTAT(_net_link_ether_arp, OID_AUTO, stats, struct arpstat,
137     arpstat, "ARP statistics (struct arpstat, net/if_arp.h)");
138 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold, CTLFLAG_VNET | CTLFLAG_RW,
139 	&VNET_NAME(arp_maxhold), 0,
140 	"Number of packets to hold per ARP entry");
141 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_log_per_second,
142 	CTLFLAG_RW, &arp_maxpps, 0,
143 	"Maximum number of remotely triggered ARP messages that can be "
144 	"logged per second");
145 
146 /*
147  * Due to the exponential backoff algorithm used for the interval between GARP
148  * retransmissions, the maximum number of retransmissions is limited for
149  * sanity. This limit corresponds to a maximum interval between retransmissions
150  * of 2^16 seconds ~= 18 hours.
151  *
152  * Making this limit more dynamic is more complicated than worthwhile,
153  * especially since sending out GARPs spaced days apart would be of little
154  * use. A maximum dynamic limit would look something like:
155  *
156  * const int max = fls(INT_MAX / hz) - 1;
157  */
158 #define MAX_GARP_RETRANSMITS 16
159 static int sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS);
160 static int garp_rexmit_count = 0; /* GARP retransmission setting. */
161 
162 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, garp_rexmit_count,
163     CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
164     &garp_rexmit_count, 0, sysctl_garp_rexmit, "I",
165     "Number of times to retransmit GARP packets;"
166     " 0 to disable, maximum of 16");
167 
168 VNET_DEFINE_STATIC(int, arp_log_level) = LOG_INFO;	/* Min. log(9) level. */
169 #define	V_arp_log_level		VNET(arp_log_level)
170 SYSCTL_INT(_net_link_ether_arp, OID_AUTO, log_level, CTLFLAG_VNET | CTLFLAG_RW,
171 	&VNET_NAME(arp_log_level), 0,
172 	"Minimum log(9) level for recording rate limited arp log messages. "
173 	"The higher will be log more (emerg=0, info=6 (default), debug=7).");
174 #define	ARP_LOG(pri, ...)	do {					\
175 	if ((pri) <= V_arp_log_level &&					\
176 	    ppsratecheck(&arp_lastlog, &arp_curpps, arp_maxpps))	\
177 		log((pri), "arp: " __VA_ARGS__);			\
178 } while (0)
179 
180 static void	arpintr(struct mbuf *);
181 static void	arptimer(void *);
182 #ifdef INET
183 static void	in_arpinput(struct mbuf *);
184 #endif
185 
186 static void arp_check_update_lle(struct arphdr *ah, struct in_addr isaddr,
187     struct ifnet *ifp, int bridged, struct llentry *la);
188 static void arp_mark_lle_reachable(struct llentry *la, struct ifnet *ifp);
189 static void arp_iflladdr(void *arg __unused, struct ifnet *ifp);
190 
191 static eventhandler_tag iflladdr_tag;
192 
193 static const struct netisr_handler arp_nh = {
194 	.nh_name = "arp",
195 	.nh_handler = arpintr,
196 	.nh_proto = NETISR_ARP,
197 	.nh_policy = NETISR_POLICY_SOURCE,
198 };
199 
200 /*
201  * Timeout routine.  Age arp_tab entries periodically.
202  */
203 static void
204 arptimer(void *arg)
205 {
206 	struct llentry *lle = (struct llentry *)arg;
207 	struct ifnet *ifp;
208 
209 	if (lle->la_flags & LLE_STATIC) {
210 		return;
211 	}
212 	LLE_WLOCK(lle);
213 	if (callout_pending(&lle->lle_timer)) {
214 		/*
215 		 * Here we are a bit odd here in the treatment of
216 		 * active/pending. If the pending bit is set, it got
217 		 * rescheduled before I ran. The active
218 		 * bit we ignore, since if it was stopped
219 		 * in ll_tablefree() and was currently running
220 		 * it would have return 0 so the code would
221 		 * not have deleted it since the callout could
222 		 * not be stopped so we want to go through
223 		 * with the delete here now. If the callout
224 		 * was restarted, the pending bit will be back on and
225 		 * we just want to bail since the callout_reset would
226 		 * return 1 and our reference would have been removed
227 		 * by arpresolve() below.
228 		 */
229 		LLE_WUNLOCK(lle);
230  		return;
231  	}
232 	ifp = lle->lle_tbl->llt_ifp;
233 	CURVNET_SET(ifp->if_vnet);
234 
235 	switch (lle->ln_state) {
236 	case ARP_LLINFO_REACHABLE:
237 
238 		/*
239 		 * Expiration time is approaching.
240 		 * Request usage feedback from the datapath.
241 		 * Change state and re-schedule ourselves.
242 		 */
243 		llentry_request_feedback(lle);
244 		lle->ln_state = ARP_LLINFO_VERIFY;
245 		callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit);
246 		LLE_WUNLOCK(lle);
247 		CURVNET_RESTORE();
248 		return;
249 	case ARP_LLINFO_VERIFY:
250 		if (llentry_get_hittime(lle) > 0 && lle->la_preempt > 0) {
251 			/* Entry was used, issue refresh request */
252 			struct epoch_tracker et;
253 			struct in_addr dst;
254 
255 			dst = lle->r_l3addr.addr4;
256 			lle->la_preempt--;
257 			callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit);
258 			LLE_WUNLOCK(lle);
259 			NET_EPOCH_ENTER(et);
260 			arprequest(ifp, NULL, &dst, NULL);
261 			NET_EPOCH_EXIT(et);
262 			CURVNET_RESTORE();
263 			return;
264 		}
265 		/* Nothing happened. Reschedule if not too late */
266 		if (lle->la_expire > time_uptime) {
267 			callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit);
268 			LLE_WUNLOCK(lle);
269 			CURVNET_RESTORE();
270 			return;
271 		}
272 		break;
273 	case ARP_LLINFO_INCOMPLETE:
274 	case ARP_LLINFO_DELETED:
275 		break;
276 	}
277 
278 	if ((lle->la_flags & LLE_DELETED) == 0) {
279 		int evt;
280 
281 		if (lle->la_flags & LLE_VALID)
282 			evt = LLENTRY_EXPIRED;
283 		else
284 			evt = LLENTRY_TIMEDOUT;
285 		EVENTHANDLER_INVOKE(lle_event, lle, evt);
286 	}
287 
288 	callout_stop(&lle->lle_timer);
289 
290 	/* XXX: LOR avoidance. We still have ref on lle. */
291 	LLE_WUNLOCK(lle);
292 	IF_AFDATA_LOCK(ifp);
293 	LLE_WLOCK(lle);
294 
295 	/* Guard against race with other llentry_free(). */
296 	if (lle->la_flags & LLE_LINKED) {
297 		LLE_REMREF(lle);
298 		lltable_unlink_entry(lle->lle_tbl, lle);
299 	}
300 	IF_AFDATA_UNLOCK(ifp);
301 
302 	size_t pkts_dropped = llentry_free(lle);
303 
304 	ARPSTAT_ADD(dropped, pkts_dropped);
305 	ARPSTAT_INC(timeouts);
306 
307 	CURVNET_RESTORE();
308 }
309 
310 /*
311  * Stores link-layer header for @ifp in format suitable for if_output()
312  * into buffer @buf. Resulting header length is stored in @bufsize.
313  *
314  * Returns 0 on success.
315  */
316 static int
317 arp_fillheader(struct ifnet *ifp, struct arphdr *ah, int bcast, u_char *buf,
318     size_t *bufsize)
319 {
320 	struct if_encap_req ereq;
321 	int error;
322 
323 	bzero(buf, *bufsize);
324 	bzero(&ereq, sizeof(ereq));
325 	ereq.buf = buf;
326 	ereq.bufsize = *bufsize;
327 	ereq.rtype = IFENCAP_LL;
328 	ereq.family = AF_ARP;
329 	ereq.lladdr = ar_tha(ah);
330 	ereq.hdata = (u_char *)ah;
331 	if (bcast)
332 		ereq.flags = IFENCAP_FLAG_BROADCAST;
333 	error = ifp->if_requestencap(ifp, &ereq);
334 	if (error == 0)
335 		*bufsize = ereq.bufsize;
336 
337 	return (error);
338 }
339 
340 /*
341  * Broadcast an ARP request. Caller specifies:
342  *	- arp header source ip address
343  *	- arp header target ip address
344  *	- arp header source ethernet address
345  */
346 static int
347 arprequest_internal(struct ifnet *ifp, const struct in_addr *sip,
348     const struct in_addr *tip, u_char *enaddr)
349 {
350 	struct mbuf *m;
351 	struct arphdr *ah;
352 	struct sockaddr sa;
353 	u_char *carpaddr = NULL;
354 	uint8_t linkhdr[LLE_MAX_LINKHDR];
355 	size_t linkhdrsize;
356 	struct route ro;
357 	int error;
358 
359 	NET_EPOCH_ASSERT();
360 
361 	if (sip == NULL) {
362 		/*
363 		 * The caller did not supply a source address, try to find
364 		 * a compatible one among those assigned to this interface.
365 		 */
366 		struct ifaddr *ifa;
367 
368 		CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
369 			if (ifa->ifa_addr->sa_family != AF_INET)
370 				continue;
371 
372 			if (ifa->ifa_carp) {
373 				if ((*carp_iamatch_p)(ifa, &carpaddr) == 0)
374 					continue;
375 				sip = &IA_SIN(ifa)->sin_addr;
376 			} else {
377 				carpaddr = NULL;
378 				sip = &IA_SIN(ifa)->sin_addr;
379 			}
380 
381 			if (0 == ((sip->s_addr ^ tip->s_addr) &
382 			    IA_MASKSIN(ifa)->sin_addr.s_addr))
383 				break;  /* found it. */
384 		}
385 		if (sip == NULL) {
386 			printf("%s: cannot find matching address\n", __func__);
387 			return (EADDRNOTAVAIL);
388 		}
389 	}
390 	if (enaddr == NULL)
391 		enaddr = carpaddr ? carpaddr : (u_char *)IF_LLADDR(ifp);
392 
393 	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
394 		return (ENOMEM);
395 	m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) +
396 		2 * ifp->if_addrlen;
397 	m->m_pkthdr.len = m->m_len;
398 	M_ALIGN(m, m->m_len);
399 	ah = mtod(m, struct arphdr *);
400 	bzero((caddr_t)ah, m->m_len);
401 #ifdef MAC
402 	mac_netinet_arp_send(ifp, m);
403 #endif
404 	ah->ar_pro = htons(ETHERTYPE_IP);
405 	ah->ar_hln = ifp->if_addrlen;		/* hardware address length */
406 	ah->ar_pln = sizeof(struct in_addr);	/* protocol address length */
407 	ah->ar_op = htons(ARPOP_REQUEST);
408 	bcopy(enaddr, ar_sha(ah), ah->ar_hln);
409 	bcopy(sip, ar_spa(ah), ah->ar_pln);
410 	bcopy(tip, ar_tpa(ah), ah->ar_pln);
411 	sa.sa_family = AF_ARP;
412 	sa.sa_len = 2;
413 
414 	/* Calculate link header for sending frame */
415 	bzero(&ro, sizeof(ro));
416 	linkhdrsize = sizeof(linkhdr);
417 	error = arp_fillheader(ifp, ah, 1, linkhdr, &linkhdrsize);
418 	if (error != 0 && error != EAFNOSUPPORT) {
419 		m_freem(m);
420 		ARP_LOG(LOG_ERR, "Failed to calculate ARP header on %s: %d\n",
421 		    if_name(ifp), error);
422 		return (error);
423 	}
424 
425 	ro.ro_prepend = linkhdr;
426 	ro.ro_plen = linkhdrsize;
427 	ro.ro_flags = 0;
428 
429 	m->m_flags |= M_BCAST;
430 	m_clrprotoflags(m);	/* Avoid confusing lower layers. */
431 	error = (*ifp->if_output)(ifp, m, &sa, &ro);
432 	ARPSTAT_INC(txrequests);
433 	if (error) {
434 		ARPSTAT_INC(txerrors);
435 		ARP_LOG(LOG_DEBUG, "Failed to send ARP packet on %s: %d\n",
436 		    if_name(ifp), error);
437 	}
438 	return (error);
439 }
440 
441 void
442 arprequest(struct ifnet *ifp, const struct in_addr *sip,
443     const struct in_addr *tip, u_char *enaddr)
444 {
445 
446 	(void) arprequest_internal(ifp, sip, tip, enaddr);
447 }
448 
449 /*
450  * Resolve an IP address into an ethernet address - heavy version.
451  * Used internally by arpresolve().
452  * We have already checked that we can't use an existing lle without
453  * modification so we have to acquire an LLE_EXCLUSIVE lle lock.
454  *
455  * On success, desten and pflags are filled in and the function returns 0;
456  * If the packet must be held pending resolution, we return EWOULDBLOCK
457  * On other errors, we return the corresponding error code.
458  * Note that m_freem() handles NULL.
459  */
460 static int
461 arpresolve_full(struct ifnet *ifp, int is_gw, int flags, struct mbuf *m,
462 	const struct sockaddr *dst, u_char *desten, uint32_t *pflags,
463 	struct llentry **plle)
464 {
465 	struct llentry *la = NULL, *la_tmp;
466 	int error, renew;
467 	char *lladdr;
468 	int ll_len;
469 
470 	NET_EPOCH_ASSERT();
471 
472 	if (pflags != NULL)
473 		*pflags = 0;
474 	if (plle != NULL)
475 		*plle = NULL;
476 
477 	if ((flags & LLE_CREATE) == 0)
478 		la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
479 	if (la == NULL && (ifp->if_flags & (IFF_NOARP | IFF_STATICARP)) == 0) {
480 		la = lltable_alloc_entry(LLTABLE(ifp), 0, dst);
481 		if (la == NULL) {
482 			char addrbuf[INET_ADDRSTRLEN];
483 
484 			log(LOG_DEBUG,
485 			    "arpresolve: can't allocate llinfo for %s on %s\n",
486 			    inet_ntoa_r(SIN(dst)->sin_addr, addrbuf),
487 			    if_name(ifp));
488 			m_freem(m);
489 			return (EINVAL);
490 		}
491 
492 		IF_AFDATA_WLOCK(ifp);
493 		LLE_WLOCK(la);
494 		la_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
495 		/* Prefer ANY existing lle over newly-created one */
496 		if (la_tmp == NULL)
497 			lltable_link_entry(LLTABLE(ifp), la);
498 		IF_AFDATA_WUNLOCK(ifp);
499 		if (la_tmp != NULL) {
500 			lltable_free_entry(LLTABLE(ifp), la);
501 			la = la_tmp;
502 		}
503 	}
504 	if (la == NULL) {
505 		m_freem(m);
506 		return (EINVAL);
507 	}
508 
509 	if ((la->la_flags & LLE_VALID) &&
510 	    ((la->la_flags & LLE_STATIC) || la->la_expire > time_uptime)) {
511 		if (flags & LLE_ADDRONLY) {
512 			lladdr = la->ll_addr;
513 			ll_len = ifp->if_addrlen;
514 		} else {
515 			lladdr = la->r_linkdata;
516 			ll_len = la->r_hdrlen;
517 		}
518 		bcopy(lladdr, desten, ll_len);
519 
520 		/* Notify LLE code that the entry was used by datapath */
521 		llentry_provide_feedback(la);
522 		if (pflags != NULL)
523 			*pflags = la->la_flags & (LLE_VALID|LLE_IFADDR);
524 		if (plle) {
525 			LLE_ADDREF(la);
526 			*plle = la;
527 		}
528 		LLE_WUNLOCK(la);
529 		return (0);
530 	}
531 
532 	renew = (la->la_asked == 0 || la->la_expire != time_uptime);
533 
534 	/*
535 	 * There is an arptab entry, but no ethernet address
536 	 * response yet.  Add the mbuf to the list, dropping
537 	 * the oldest packet if we have exceeded the system
538 	 * setting.
539 	 */
540 	if (m != NULL) {
541 		size_t dropped = lltable_append_entry_queue(la, m, V_arp_maxhold);
542 		ARPSTAT_ADD(dropped, dropped);
543 	}
544 
545 	/*
546 	 * Return EWOULDBLOCK if we have tried less than arp_maxtries. It
547 	 * will be masked by ether_output(). Return EHOSTDOWN/EHOSTUNREACH
548 	 * if we have already sent arp_maxtries ARP requests. Retransmit the
549 	 * ARP request, but not faster than one request per second.
550 	 */
551 	if (la->la_asked < V_arp_maxtries)
552 		error = EWOULDBLOCK;	/* First request. */
553 	else
554 		error = is_gw != 0 ? EHOSTUNREACH : EHOSTDOWN;
555 
556 	if (renew) {
557 		int canceled, e;
558 
559 		LLE_ADDREF(la);
560 		la->la_expire = time_uptime;
561 		canceled = callout_reset(&la->lle_timer, hz * V_arpt_down,
562 		    arptimer, la);
563 		if (canceled)
564 			LLE_REMREF(la);
565 		la->la_asked++;
566 		LLE_WUNLOCK(la);
567 		e = arprequest_internal(ifp, NULL, &SIN(dst)->sin_addr, NULL);
568 		/*
569 		 * Only overwrite 'error' in case of error; in case of success
570 		 * the proper return value was already set above.
571 		 */
572 		if (e != 0)
573 			return (e);
574 		return (error);
575 	}
576 
577 	LLE_WUNLOCK(la);
578 	return (error);
579 }
580 
581 /*
582  * Lookups link header based on an IP address.
583  * On input:
584  *    ifp is the interface we use
585  *    is_gw != 0 if @dst represents gateway to some destination
586  *    m is the mbuf. May be NULL if we don't have a packet.
587  *    dst is the next hop,
588  *    desten is the storage to put LL header.
589  *    flags returns subset of lle flags: LLE_VALID | LLE_IFADDR
590  *
591  * On success, full/partial link header and flags are filled in and
592  * the function returns 0.
593  * If the packet must be held pending resolution, we return EWOULDBLOCK
594  * On other errors, we return the corresponding error code.
595  * Note that m_freem() handles NULL.
596  */
597 int
598 arpresolve(struct ifnet *ifp, int is_gw, struct mbuf *m,
599 	const struct sockaddr *dst, u_char *desten, uint32_t *pflags,
600 	struct llentry **plle)
601 {
602 	struct llentry *la = NULL;
603 
604 	NET_EPOCH_ASSERT();
605 
606 	if (pflags != NULL)
607 		*pflags = 0;
608 	if (plle != NULL)
609 		*plle = NULL;
610 
611 	if (m != NULL) {
612 		if (m->m_flags & M_BCAST) {
613 			/* broadcast */
614 			(void)memcpy(desten,
615 			    ifp->if_broadcastaddr, ifp->if_addrlen);
616 			return (0);
617 		}
618 		if (m->m_flags & M_MCAST) {
619 			/* multicast */
620 			ETHER_MAP_IP_MULTICAST(&SIN(dst)->sin_addr, desten);
621 			return (0);
622 		}
623 	}
624 
625 	la = lla_lookup(LLTABLE(ifp), plle ? LLE_EXCLUSIVE : LLE_UNLOCKED, dst);
626 	if (la != NULL && (la->r_flags & RLLE_VALID) != 0) {
627 		/* Entry found, let's copy lle info */
628 		bcopy(la->r_linkdata, desten, la->r_hdrlen);
629 		if (pflags != NULL)
630 			*pflags = LLE_VALID | (la->r_flags & RLLE_IFADDR);
631 		/* Notify the LLE handling code that the entry was used. */
632 		llentry_provide_feedback(la);
633 		if (plle) {
634 			LLE_ADDREF(la);
635 			*plle = la;
636 			LLE_WUNLOCK(la);
637 		}
638 		return (0);
639 	}
640 	if (plle && la)
641 		LLE_WUNLOCK(la);
642 
643 	return (arpresolve_full(ifp, is_gw, la == NULL ? LLE_CREATE : 0, m, dst,
644 	    desten, pflags, plle));
645 }
646 
647 /*
648  * Common length and type checks are done here,
649  * then the protocol-specific routine is called.
650  */
651 static void
652 arpintr(struct mbuf *m)
653 {
654 	struct arphdr *ar;
655 	struct ifnet *ifp;
656 	char *layer;
657 	int hlen;
658 
659 	ifp = m->m_pkthdr.rcvif;
660 
661 	if (m->m_len < sizeof(struct arphdr) &&
662 	    ((m = m_pullup(m, sizeof(struct arphdr))) == NULL)) {
663 		ARP_LOG(LOG_NOTICE, "packet with short header received on %s\n",
664 		    if_name(ifp));
665 		return;
666 	}
667 	ar = mtod(m, struct arphdr *);
668 
669 	/* Check if length is sufficient */
670 	if (m->m_len <  arphdr_len(ar)) {
671 		m = m_pullup(m, arphdr_len(ar));
672 		if (m == NULL) {
673 			ARP_LOG(LOG_NOTICE, "short packet received on %s\n",
674 			    if_name(ifp));
675 			return;
676 		}
677 		ar = mtod(m, struct arphdr *);
678 	}
679 
680 	hlen = 0;
681 	layer = "";
682 	switch (ntohs(ar->ar_hrd)) {
683 	case ARPHRD_ETHER:
684 		hlen = ETHER_ADDR_LEN; /* RFC 826 */
685 		layer = "ethernet";
686 		break;
687 	case ARPHRD_INFINIBAND:
688 		hlen = 20;	/* RFC 4391, INFINIBAND_ALEN */
689 		layer = "infiniband";
690 		break;
691 	case ARPHRD_IEEE1394:
692 		hlen = 0; /* SHALL be 16 */ /* RFC 2734 */
693 		layer = "firewire";
694 
695 		/*
696 		 * Restrict too long hardware addresses.
697 		 * Currently we are capable of handling 20-byte
698 		 * addresses ( sizeof(lle->ll_addr) )
699 		 */
700 		if (ar->ar_hln >= 20)
701 			hlen = 16;
702 		break;
703 	default:
704 		ARP_LOG(LOG_NOTICE,
705 		    "packet with unknown hardware format 0x%02d received on "
706 		    "%s\n", ntohs(ar->ar_hrd), if_name(ifp));
707 		m_freem(m);
708 		return;
709 	}
710 
711 	if (hlen != 0 && hlen != ar->ar_hln) {
712 		ARP_LOG(LOG_NOTICE,
713 		    "packet with invalid %s address length %d received on %s\n",
714 		    layer, ar->ar_hln, if_name(ifp));
715 		m_freem(m);
716 		return;
717 	}
718 
719 	ARPSTAT_INC(received);
720 	switch (ntohs(ar->ar_pro)) {
721 #ifdef INET
722 	case ETHERTYPE_IP:
723 		in_arpinput(m);
724 		return;
725 #endif
726 	}
727 	m_freem(m);
728 }
729 
730 #ifdef INET
731 /*
732  * ARP for Internet protocols on 10 Mb/s Ethernet.
733  * Algorithm is that given in RFC 826.
734  * In addition, a sanity check is performed on the sender
735  * protocol address, to catch impersonators.
736  * We no longer handle negotiations for use of trailer protocol:
737  * Formerly, ARP replied for protocol type ETHERTYPE_TRAIL sent
738  * along with IP replies if we wanted trailers sent to us,
739  * and also sent them in response to IP replies.
740  * This allowed either end to announce the desire to receive
741  * trailer packets.
742  * We no longer reply to requests for ETHERTYPE_TRAIL protocol either,
743  * but formerly didn't normally send requests.
744  */
745 static int log_arp_wrong_iface = 1;
746 static int log_arp_movements = 1;
747 static int log_arp_permanent_modify = 1;
748 static int allow_multicast = 0;
749 
750 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_wrong_iface, CTLFLAG_RW,
751 	&log_arp_wrong_iface, 0,
752 	"log arp packets arriving on the wrong interface");
753 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_movements, CTLFLAG_RW,
754 	&log_arp_movements, 0,
755 	"log arp replies from MACs different than the one in the cache");
756 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_permanent_modify, CTLFLAG_RW,
757 	&log_arp_permanent_modify, 0,
758 	"log arp replies from MACs different than the one in the permanent arp entry");
759 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, allow_multicast, CTLFLAG_RW,
760 	&allow_multicast, 0, "accept multicast addresses");
761 
762 static void
763 in_arpinput(struct mbuf *m)
764 {
765 	struct arphdr *ah;
766 	struct ifnet *ifp = m->m_pkthdr.rcvif;
767 	struct llentry *la = NULL, *la_tmp;
768 	struct ifaddr *ifa;
769 	struct in_ifaddr *ia;
770 	struct sockaddr sa;
771 	struct in_addr isaddr, itaddr, myaddr;
772 	u_int8_t *enaddr = NULL;
773 	int op;
774 	int bridged = 0, is_bridge = 0;
775 	int carped;
776 	struct sockaddr_in sin;
777 	struct sockaddr *dst;
778 	struct nhop_object *nh;
779 	uint8_t linkhdr[LLE_MAX_LINKHDR];
780 	struct route ro;
781 	size_t linkhdrsize;
782 	int lladdr_off;
783 	int error;
784 	char addrbuf[INET_ADDRSTRLEN];
785 
786 	NET_EPOCH_ASSERT();
787 
788 	sin.sin_len = sizeof(struct sockaddr_in);
789 	sin.sin_family = AF_INET;
790 	sin.sin_addr.s_addr = 0;
791 
792 	if (ifp->if_bridge)
793 		bridged = 1;
794 	if (ifp->if_type == IFT_BRIDGE)
795 		is_bridge = 1;
796 
797 	/*
798 	 * We already have checked that mbuf contains enough contiguous data
799 	 * to hold entire arp message according to the arp header.
800 	 */
801 	ah = mtod(m, struct arphdr *);
802 
803 	/*
804 	 * ARP is only for IPv4 so we can reject packets with
805 	 * a protocol length not equal to an IPv4 address.
806 	 */
807 	if (ah->ar_pln != sizeof(struct in_addr)) {
808 		ARP_LOG(LOG_NOTICE, "requested protocol length != %zu\n",
809 		    sizeof(struct in_addr));
810 		goto drop;
811 	}
812 
813 	if (allow_multicast == 0 && ETHER_IS_MULTICAST(ar_sha(ah))) {
814 		ARP_LOG(LOG_NOTICE, "%*D is multicast\n",
815 		    ifp->if_addrlen, (u_char *)ar_sha(ah), ":");
816 		goto drop;
817 	}
818 
819 	op = ntohs(ah->ar_op);
820 	(void)memcpy(&isaddr, ar_spa(ah), sizeof (isaddr));
821 	(void)memcpy(&itaddr, ar_tpa(ah), sizeof (itaddr));
822 
823 	if (op == ARPOP_REPLY)
824 		ARPSTAT_INC(rxreplies);
825 
826 	/*
827 	 * For a bridge, we want to check the address irrespective
828 	 * of the receive interface. (This will change slightly
829 	 * when we have clusters of interfaces).
830 	 */
831 	CK_LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) {
832 		if (((bridged && ia->ia_ifp->if_bridge == ifp->if_bridge) ||
833 		    ia->ia_ifp == ifp) &&
834 		    itaddr.s_addr == ia->ia_addr.sin_addr.s_addr &&
835 		    (ia->ia_ifa.ifa_carp == NULL ||
836 		    (*carp_iamatch_p)(&ia->ia_ifa, &enaddr))) {
837 			ifa_ref(&ia->ia_ifa);
838 			goto match;
839 		}
840 	}
841 	CK_LIST_FOREACH(ia, INADDR_HASH(isaddr.s_addr), ia_hash)
842 		if (((bridged && ia->ia_ifp->if_bridge == ifp->if_bridge) ||
843 		    ia->ia_ifp == ifp) &&
844 		    isaddr.s_addr == ia->ia_addr.sin_addr.s_addr) {
845 			ifa_ref(&ia->ia_ifa);
846 			goto match;
847 		}
848 
849 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia)				\
850   (ia->ia_ifp->if_bridge == ifp->if_softc &&				\
851   !bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) &&	\
852   addr == ia->ia_addr.sin_addr.s_addr)
853 	/*
854 	 * Check the case when bridge shares its MAC address with
855 	 * some of its children, so packets are claimed by bridge
856 	 * itself (bridge_input() does it first), but they are really
857 	 * meant to be destined to the bridge member.
858 	 */
859 	if (is_bridge) {
860 		CK_LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) {
861 			if (BDG_MEMBER_MATCHES_ARP(itaddr.s_addr, ifp, ia)) {
862 				ifa_ref(&ia->ia_ifa);
863 				ifp = ia->ia_ifp;
864 				goto match;
865 			}
866 		}
867 	}
868 #undef BDG_MEMBER_MATCHES_ARP
869 
870 	/*
871 	 * No match, use the first inet address on the receive interface
872 	 * as a dummy address for the rest of the function.
873 	 */
874 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
875 		if (ifa->ifa_addr->sa_family == AF_INET &&
876 		    (ifa->ifa_carp == NULL ||
877 		    (*carp_iamatch_p)(ifa, &enaddr))) {
878 			ia = ifatoia(ifa);
879 			ifa_ref(ifa);
880 			goto match;
881 		}
882 
883 	/*
884 	 * If bridging, fall back to using any inet address.
885 	 */
886 	if (!bridged || (ia = CK_STAILQ_FIRST(&V_in_ifaddrhead)) == NULL)
887 		goto drop;
888 	ifa_ref(&ia->ia_ifa);
889 match:
890 	if (!enaddr)
891 		enaddr = (u_int8_t *)IF_LLADDR(ifp);
892 	carped = (ia->ia_ifa.ifa_carp != NULL);
893 	myaddr = ia->ia_addr.sin_addr;
894 	ifa_free(&ia->ia_ifa);
895 	if (!bcmp(ar_sha(ah), enaddr, ifp->if_addrlen))
896 		goto drop;	/* it's from me, ignore it. */
897 	if (!bcmp(ar_sha(ah), ifp->if_broadcastaddr, ifp->if_addrlen)) {
898 		ARP_LOG(LOG_NOTICE, "link address is broadcast for IP address "
899 		    "%s!\n", inet_ntoa_r(isaddr, addrbuf));
900 		goto drop;
901 	}
902 
903 	if (ifp->if_addrlen != ah->ar_hln) {
904 		ARP_LOG(LOG_WARNING, "from %*D: addr len: new %d, "
905 		    "i/f %d (ignored)\n", ifp->if_addrlen,
906 		    (u_char *) ar_sha(ah), ":", ah->ar_hln,
907 		    ifp->if_addrlen);
908 		goto drop;
909 	}
910 
911 	/*
912 	 * Warn if another host is using the same IP address, but only if the
913 	 * IP address isn't 0.0.0.0, which is used for DHCP only, in which
914 	 * case we suppress the warning to avoid false positive complaints of
915 	 * potential misconfiguration.
916 	 */
917 	if (!bridged && !carped && isaddr.s_addr == myaddr.s_addr &&
918 	    myaddr.s_addr != 0) {
919 		ARP_LOG(LOG_ERR, "%*D is using my IP address %s on %s!\n",
920 		   ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
921 		   inet_ntoa_r(isaddr, addrbuf), ifp->if_xname);
922 		itaddr = myaddr;
923 		ARPSTAT_INC(dupips);
924 		goto reply;
925 	}
926 	if (ifp->if_flags & IFF_STATICARP)
927 		goto reply;
928 
929 	bzero(&sin, sizeof(sin));
930 	sin.sin_len = sizeof(struct sockaddr_in);
931 	sin.sin_family = AF_INET;
932 	sin.sin_addr = isaddr;
933 	dst = (struct sockaddr *)&sin;
934 	la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
935 	if (la != NULL)
936 		arp_check_update_lle(ah, isaddr, ifp, bridged, la);
937 	else if (itaddr.s_addr == myaddr.s_addr) {
938 		/*
939 		 * Request/reply to our address, but no lle exists yet.
940 		 * Calculate full link prepend to use in lle.
941 		 */
942 		linkhdrsize = sizeof(linkhdr);
943 		if (lltable_calc_llheader(ifp, AF_INET, ar_sha(ah), linkhdr,
944 		    &linkhdrsize, &lladdr_off) != 0)
945 			goto reply;
946 
947 		/* Allocate new entry */
948 		la = lltable_alloc_entry(LLTABLE(ifp), 0, dst);
949 		if (la == NULL) {
950 			/*
951 			 * lle creation may fail if source address belongs
952 			 * to non-directly connected subnet. However, we
953 			 * will try to answer the request instead of dropping
954 			 * frame.
955 			 */
956 			goto reply;
957 		}
958 		lltable_set_entry_addr(ifp, la, linkhdr, linkhdrsize,
959 		    lladdr_off);
960 
961 		IF_AFDATA_WLOCK(ifp);
962 		LLE_WLOCK(la);
963 		la_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
964 
965 		/*
966 		 * Check if lle still does not exists.
967 		 * If it does, that means that we either
968 		 * 1) have configured it explicitly, via
969 		 * 1a) 'arp -s' static entry or
970 		 * 1b) interface address static record
971 		 * or
972 		 * 2) it was the result of sending first packet to-host
973 		 * or
974 		 * 3) it was another arp reply packet we handled in
975 		 * different thread.
976 		 *
977 		 * In all cases except 3) we definitely need to prefer
978 		 * existing lle. For the sake of simplicity, prefer any
979 		 * existing lle over newly-create one.
980 		 */
981 		if (la_tmp == NULL)
982 			lltable_link_entry(LLTABLE(ifp), la);
983 		IF_AFDATA_WUNLOCK(ifp);
984 
985 		if (la_tmp == NULL) {
986 			arp_mark_lle_reachable(la, ifp);
987 			LLE_WUNLOCK(la);
988 		} else {
989 			/* Free newly-create entry and handle packet */
990 			lltable_free_entry(LLTABLE(ifp), la);
991 			la = la_tmp;
992 			la_tmp = NULL;
993 			arp_check_update_lle(ah, isaddr, ifp, bridged, la);
994 			/* arp_check_update_lle() returns @la unlocked */
995 		}
996 		la = NULL;
997 	}
998 reply:
999 	if (op != ARPOP_REQUEST)
1000 		goto drop;
1001 	ARPSTAT_INC(rxrequests);
1002 
1003 	if (itaddr.s_addr == myaddr.s_addr) {
1004 		/* Shortcut.. the receiving interface is the target. */
1005 		(void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
1006 		(void)memcpy(ar_sha(ah), enaddr, ah->ar_hln);
1007 	} else {
1008 		/*
1009 		 * Destination address is not ours. Check if
1010 		 * proxyarp entry exists or proxyarp is turned on globally.
1011 		 */
1012 		struct llentry *lle;
1013 
1014 		sin.sin_addr = itaddr;
1015 		lle = lla_lookup(LLTABLE(ifp), 0, (struct sockaddr *)&sin);
1016 
1017 		if ((lle != NULL) && (lle->la_flags & LLE_PUB)) {
1018 			(void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
1019 			(void)memcpy(ar_sha(ah), lle->ll_addr, ah->ar_hln);
1020 			LLE_RUNLOCK(lle);
1021 		} else {
1022 			if (lle != NULL)
1023 				LLE_RUNLOCK(lle);
1024 
1025 			if (!V_arp_proxyall)
1026 				goto drop;
1027 
1028 			NET_EPOCH_ASSERT();
1029 			nh = fib4_lookup(ifp->if_fib, itaddr, 0, 0, 0);
1030 			if (nh == NULL)
1031 				goto drop;
1032 
1033 			/*
1034 			 * Don't send proxies for nodes on the same interface
1035 			 * as this one came out of, or we'll get into a fight
1036 			 * over who claims what Ether address.
1037 			 */
1038 			if (nh->nh_ifp == ifp)
1039 				goto drop;
1040 
1041 			(void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
1042 			(void)memcpy(ar_sha(ah), enaddr, ah->ar_hln);
1043 
1044 			/*
1045 			 * Also check that the node which sent the ARP packet
1046 			 * is on the interface we expect it to be on. This
1047 			 * avoids ARP chaos if an interface is connected to the
1048 			 * wrong network.
1049 			 */
1050 
1051 			nh = fib4_lookup(ifp->if_fib, isaddr, 0, 0, 0);
1052 			if (nh == NULL)
1053 				goto drop;
1054 			if (nh->nh_ifp != ifp) {
1055 				ARP_LOG(LOG_INFO, "proxy: ignoring request"
1056 				    " from %s via %s\n",
1057 				    inet_ntoa_r(isaddr, addrbuf),
1058 				    ifp->if_xname);
1059 				goto drop;
1060 			}
1061 
1062 #ifdef DEBUG_PROXY
1063 			printf("arp: proxying for %s\n",
1064 			    inet_ntoa_r(itaddr, addrbuf));
1065 #endif
1066 		}
1067 	}
1068 
1069 	if (itaddr.s_addr == myaddr.s_addr &&
1070 	    IN_LINKLOCAL(ntohl(itaddr.s_addr))) {
1071 		/* RFC 3927 link-local IPv4; always reply by broadcast. */
1072 #ifdef DEBUG_LINKLOCAL
1073 		printf("arp: sending reply for link-local addr %s\n",
1074 		    inet_ntoa_r(itaddr, addrbuf));
1075 #endif
1076 		m->m_flags |= M_BCAST;
1077 		m->m_flags &= ~M_MCAST;
1078 	} else {
1079 		/* default behaviour; never reply by broadcast. */
1080 		m->m_flags &= ~(M_BCAST|M_MCAST);
1081 	}
1082 	(void)memcpy(ar_tpa(ah), ar_spa(ah), ah->ar_pln);
1083 	(void)memcpy(ar_spa(ah), &itaddr, ah->ar_pln);
1084 	ah->ar_op = htons(ARPOP_REPLY);
1085 	ah->ar_pro = htons(ETHERTYPE_IP); /* let's be sure! */
1086 	m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + (2 * ah->ar_hln);
1087 	m->m_pkthdr.len = m->m_len;
1088 	m->m_pkthdr.rcvif = NULL;
1089 	sa.sa_family = AF_ARP;
1090 	sa.sa_len = 2;
1091 
1092 	/* Calculate link header for sending frame */
1093 	bzero(&ro, sizeof(ro));
1094 	linkhdrsize = sizeof(linkhdr);
1095 	error = arp_fillheader(ifp, ah, 0, linkhdr, &linkhdrsize);
1096 
1097 	/*
1098 	 * arp_fillheader() may fail due to lack of support inside encap request
1099 	 * routing. This is not necessary an error, AF_ARP can/should be handled
1100 	 * by if_output().
1101 	 */
1102 	if (error != 0 && error != EAFNOSUPPORT) {
1103 		ARP_LOG(LOG_ERR, "Failed to calculate ARP header on %s: %d\n",
1104 		    if_name(ifp), error);
1105 		goto drop;
1106 	}
1107 
1108 	ro.ro_prepend = linkhdr;
1109 	ro.ro_plen = linkhdrsize;
1110 	ro.ro_flags = 0;
1111 
1112 	m_clrprotoflags(m);	/* Avoid confusing lower layers. */
1113 	(*ifp->if_output)(ifp, m, &sa, &ro);
1114 	ARPSTAT_INC(txreplies);
1115 	return;
1116 
1117 drop:
1118 	m_freem(m);
1119 }
1120 #endif
1121 
1122 static struct mbuf *
1123 arp_grab_holdchain(struct llentry *la)
1124 {
1125 	struct mbuf *chain;
1126 
1127 	LLE_WLOCK_ASSERT(la);
1128 
1129 	chain = la->la_hold;
1130 	la->la_hold = NULL;
1131 	la->la_numheld = 0;
1132 
1133 	return (chain);
1134 }
1135 
1136 static void
1137 arp_flush_holdchain(struct ifnet *ifp, struct llentry *la, struct mbuf *chain)
1138 {
1139 	struct mbuf *m_hold, *m_hold_next;
1140 	struct sockaddr_in sin;
1141 
1142 	NET_EPOCH_ASSERT();
1143 
1144 	struct route ro = {
1145 		.ro_prepend = la->r_linkdata,
1146 		.ro_plen = la->r_hdrlen,
1147 	};
1148 
1149 	lltable_fill_sa_entry(la, (struct sockaddr *)&sin);
1150 
1151 	for (m_hold = chain; m_hold != NULL; m_hold = m_hold_next) {
1152 		m_hold_next = m_hold->m_nextpkt;
1153 		m_hold->m_nextpkt = NULL;
1154 		/* Avoid confusing lower layers. */
1155 		m_clrprotoflags(m_hold);
1156 		(*ifp->if_output)(ifp, m_hold, (struct sockaddr *)&sin, &ro);
1157 	}
1158 }
1159 
1160 /*
1161  * Checks received arp data against existing @la.
1162  * Updates lle state/performs notification if necessary.
1163  */
1164 static void
1165 arp_check_update_lle(struct arphdr *ah, struct in_addr isaddr, struct ifnet *ifp,
1166     int bridged, struct llentry *la)
1167 {
1168 	uint8_t linkhdr[LLE_MAX_LINKHDR];
1169 	size_t linkhdrsize;
1170 	int lladdr_off;
1171 	char addrbuf[INET_ADDRSTRLEN];
1172 
1173 	LLE_WLOCK_ASSERT(la);
1174 
1175 	/* the following is not an error when doing bridging */
1176 	if (!bridged && la->lle_tbl->llt_ifp != ifp) {
1177 		if (log_arp_wrong_iface)
1178 			ARP_LOG(LOG_WARNING, "%s is on %s "
1179 			    "but got reply from %*D on %s\n",
1180 			    inet_ntoa_r(isaddr, addrbuf),
1181 			    la->lle_tbl->llt_ifp->if_xname,
1182 			    ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
1183 			    ifp->if_xname);
1184 		LLE_WUNLOCK(la);
1185 		return;
1186 	}
1187 	if ((la->la_flags & LLE_VALID) &&
1188 	    bcmp(ar_sha(ah), la->ll_addr, ifp->if_addrlen)) {
1189 		if (la->la_flags & LLE_STATIC) {
1190 			LLE_WUNLOCK(la);
1191 			if (log_arp_permanent_modify)
1192 				ARP_LOG(LOG_ERR,
1193 				    "%*D attempts to modify "
1194 				    "permanent entry for %s on %s\n",
1195 				    ifp->if_addrlen,
1196 				    (u_char *)ar_sha(ah), ":",
1197 				    inet_ntoa_r(isaddr, addrbuf),
1198 				    ifp->if_xname);
1199 			return;
1200 		}
1201 		if (log_arp_movements) {
1202 			ARP_LOG(LOG_INFO, "%s moved from %*D "
1203 			    "to %*D on %s\n",
1204 			    inet_ntoa_r(isaddr, addrbuf),
1205 			    ifp->if_addrlen,
1206 			    (u_char *)la->ll_addr, ":",
1207 			    ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
1208 			    ifp->if_xname);
1209 		}
1210 	}
1211 
1212 	/* Calculate full link prepend to use in lle */
1213 	linkhdrsize = sizeof(linkhdr);
1214 	if (lltable_calc_llheader(ifp, AF_INET, ar_sha(ah), linkhdr,
1215 	    &linkhdrsize, &lladdr_off) != 0) {
1216 		LLE_WUNLOCK(la);
1217 		return;
1218 	}
1219 
1220 	/* Check if something has changed */
1221 	if (memcmp(la->r_linkdata, linkhdr, linkhdrsize) != 0 ||
1222 	    (la->la_flags & LLE_VALID) == 0) {
1223 		/* Try to perform LLE update */
1224 		if (lltable_try_set_entry_addr(ifp, la, linkhdr, linkhdrsize,
1225 		    lladdr_off) == 0) {
1226 			LLE_WUNLOCK(la);
1227 			return;
1228 		}
1229 
1230 		/* Clear fast path feedback request if set */
1231 		llentry_mark_used(la);
1232 	}
1233 
1234 	arp_mark_lle_reachable(la, ifp);
1235 
1236 	/*
1237 	 * The packets are all freed within the call to the output
1238 	 * routine.
1239 	 *
1240 	 * NB: The lock MUST be released before the call to the
1241 	 * output routine.
1242 	 */
1243 	if (la->la_hold != NULL) {
1244 		struct mbuf *chain;
1245 
1246 		chain = arp_grab_holdchain(la);
1247 		LLE_WUNLOCK(la);
1248 		arp_flush_holdchain(ifp, la, chain);
1249 	} else
1250 		LLE_WUNLOCK(la);
1251 }
1252 
1253 static void
1254 arp_mark_lle_reachable(struct llentry *la, struct ifnet *ifp)
1255 {
1256 	int canceled, wtime;
1257 
1258 	LLE_WLOCK_ASSERT(la);
1259 
1260 	la->ln_state = ARP_LLINFO_REACHABLE;
1261 	EVENTHANDLER_INVOKE(lle_event, la, LLENTRY_RESOLVED);
1262 
1263 	if ((ifp->if_flags & IFF_STICKYARP) != 0)
1264 		la->la_flags |= LLE_STATIC;
1265 
1266 	if (!(la->la_flags & LLE_STATIC)) {
1267 		LLE_ADDREF(la);
1268 		la->la_expire = time_uptime + V_arpt_keep;
1269 		wtime = V_arpt_keep - V_arp_maxtries * V_arpt_rexmit;
1270 		if (wtime < 0)
1271 			wtime = V_arpt_keep;
1272 		canceled = callout_reset(&la->lle_timer,
1273 		    hz * wtime, arptimer, la);
1274 		if (canceled)
1275 			LLE_REMREF(la);
1276 	}
1277 	la->la_asked = 0;
1278 	la->la_preempt = V_arp_maxtries;
1279 }
1280 
1281 /*
1282  * Add permanent link-layer record for given interface address.
1283  */
1284 static __noinline void
1285 arp_add_ifa_lle(struct ifnet *ifp, const struct sockaddr *dst)
1286 {
1287 	struct llentry *lle, *lle_tmp;
1288 
1289 	/*
1290 	 * Interface address LLE record is considered static
1291 	 * because kernel code relies on LLE_STATIC flag to check
1292 	 * if these entries can be rewriten by arp updates.
1293 	 */
1294 	lle = lltable_alloc_entry(LLTABLE(ifp), LLE_IFADDR | LLE_STATIC, dst);
1295 	if (lle == NULL) {
1296 		log(LOG_INFO, "arp_ifinit: cannot create arp "
1297 		    "entry for interface address\n");
1298 		return;
1299 	}
1300 
1301 	IF_AFDATA_WLOCK(ifp);
1302 	LLE_WLOCK(lle);
1303 	/* Unlink any entry if exists */
1304 	lle_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
1305 	if (lle_tmp != NULL)
1306 		lltable_unlink_entry(LLTABLE(ifp), lle_tmp);
1307 
1308 	lltable_link_entry(LLTABLE(ifp), lle);
1309 	IF_AFDATA_WUNLOCK(ifp);
1310 
1311 	if (lle_tmp != NULL)
1312 		EVENTHANDLER_INVOKE(lle_event, lle_tmp, LLENTRY_EXPIRED);
1313 
1314 	EVENTHANDLER_INVOKE(lle_event, lle, LLENTRY_RESOLVED);
1315 	LLE_WUNLOCK(lle);
1316 	if (lle_tmp != NULL)
1317 		lltable_free_entry(LLTABLE(ifp), lle_tmp);
1318 }
1319 
1320 /*
1321  * Handle the garp_rexmit_count. Like sysctl_handle_int(), but limits the range
1322  * of valid values.
1323  */
1324 static int
1325 sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS)
1326 {
1327 	int error;
1328 	int rexmit_count = *(int *)arg1;
1329 
1330 	error = sysctl_handle_int(oidp, &rexmit_count, 0, req);
1331 
1332 	/* Enforce limits on any new value that may have been set. */
1333 	if (!error && req->newptr) {
1334 		/* A new value was set. */
1335 		if (rexmit_count < 0) {
1336 			rexmit_count = 0;
1337 		} else if (rexmit_count > MAX_GARP_RETRANSMITS) {
1338 			rexmit_count = MAX_GARP_RETRANSMITS;
1339 		}
1340 		*(int *)arg1 = rexmit_count;
1341 	}
1342 
1343 	return (error);
1344 }
1345 
1346 /*
1347  * Retransmit a Gratuitous ARP (GARP) and, if necessary, schedule a callout to
1348  * retransmit it again. A pending callout owns a reference to the ifa.
1349  */
1350 static void
1351 garp_rexmit(void *arg)
1352 {
1353 	struct in_ifaddr *ia = arg;
1354 
1355 	if (callout_pending(&ia->ia_garp_timer) ||
1356 	    !callout_active(&ia->ia_garp_timer)) {
1357 		IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1358 		ifa_free(&ia->ia_ifa);
1359 		return;
1360 	}
1361 
1362 	CURVNET_SET(ia->ia_ifa.ifa_ifp->if_vnet);
1363 
1364 	/*
1365 	 * Drop lock while the ARP request is generated.
1366 	 */
1367 	IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1368 
1369 	arprequest(ia->ia_ifa.ifa_ifp, &IA_SIN(ia)->sin_addr,
1370 	    &IA_SIN(ia)->sin_addr, IF_LLADDR(ia->ia_ifa.ifa_ifp));
1371 
1372 	/*
1373 	 * Increment the count of retransmissions. If the count has reached the
1374 	 * maximum value, stop sending the GARP packets. Otherwise, schedule
1375 	 * the callout to retransmit another GARP packet.
1376 	 */
1377 	++ia->ia_garp_count;
1378 	if (ia->ia_garp_count >= garp_rexmit_count) {
1379 		ifa_free(&ia->ia_ifa);
1380 	} else {
1381 		int rescheduled;
1382 		IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
1383 		rescheduled = callout_reset(&ia->ia_garp_timer,
1384 		    (1 << ia->ia_garp_count) * hz,
1385 		    garp_rexmit, ia);
1386 		IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1387 		if (rescheduled) {
1388 			ifa_free(&ia->ia_ifa);
1389 		}
1390 	}
1391 
1392 	CURVNET_RESTORE();
1393 }
1394 
1395 /*
1396  * Start the GARP retransmit timer.
1397  *
1398  * A single GARP is always transmitted when an IPv4 address is added
1399  * to an interface and that is usually sufficient. However, in some
1400  * circumstances, such as when a shared address is passed between
1401  * cluster nodes, this single GARP may occasionally be dropped or
1402  * lost. This can lead to neighbors on the network link working with a
1403  * stale ARP cache and sending packets destined for that address to
1404  * the node that previously owned the address, which may not respond.
1405  *
1406  * To avoid this situation, GARP retransmits can be enabled by setting
1407  * the net.link.ether.inet.garp_rexmit_count sysctl to a value greater
1408  * than zero. The setting represents the maximum number of
1409  * retransmissions. The interval between retransmissions is calculated
1410  * using an exponential backoff algorithm, doubling each time, so the
1411  * retransmission intervals are: {1, 2, 4, 8, 16, ...} (seconds).
1412  */
1413 static void
1414 garp_timer_start(struct ifaddr *ifa)
1415 {
1416 	struct in_ifaddr *ia = (struct in_ifaddr *) ifa;
1417 
1418 	IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
1419 	ia->ia_garp_count = 0;
1420 	if (callout_reset(&ia->ia_garp_timer, (1 << ia->ia_garp_count) * hz,
1421 	    garp_rexmit, ia) == 0) {
1422 		ifa_ref(ifa);
1423 	}
1424 	IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1425 }
1426 
1427 void
1428 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
1429 {
1430 	struct epoch_tracker et;
1431 	const struct sockaddr_in *dst_in;
1432 	const struct sockaddr *dst;
1433 
1434 	if (ifa->ifa_carp != NULL)
1435 		return;
1436 
1437 	dst = ifa->ifa_addr;
1438 	dst_in = (const struct sockaddr_in *)dst;
1439 
1440 	if (ntohl(dst_in->sin_addr.s_addr) == INADDR_ANY)
1441 		return;
1442 	NET_EPOCH_ENTER(et);
1443 	arp_announce_ifaddr(ifp, dst_in->sin_addr, IF_LLADDR(ifp));
1444 	NET_EPOCH_EXIT(et);
1445 	if (garp_rexmit_count > 0) {
1446 		garp_timer_start(ifa);
1447 	}
1448 
1449 	arp_add_ifa_lle(ifp, dst);
1450 }
1451 
1452 void
1453 arp_announce_ifaddr(struct ifnet *ifp, struct in_addr addr, u_char *enaddr)
1454 {
1455 
1456 	if (ntohl(addr.s_addr) != INADDR_ANY)
1457 		arprequest(ifp, &addr, &addr, enaddr);
1458 }
1459 
1460 /*
1461  * Sends gratuitous ARPs for each ifaddr to notify other
1462  * nodes about the address change.
1463  */
1464 static __noinline void
1465 arp_handle_ifllchange(struct ifnet *ifp)
1466 {
1467 	struct ifaddr *ifa;
1468 
1469 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1470 		if (ifa->ifa_addr->sa_family == AF_INET)
1471 			arp_ifinit(ifp, ifa);
1472 	}
1473 }
1474 
1475 /*
1476  * A handler for interface link layer address change event.
1477  */
1478 static void
1479 arp_iflladdr(void *arg __unused, struct ifnet *ifp)
1480 {
1481 	/* if_bridge can update its lladdr during if_vmove(), after we've done
1482 	 * if_detach_internal()/dom_ifdetach(). */
1483 	if (ifp->if_afdata[AF_INET] == NULL)
1484 		return;
1485 
1486 	lltable_update_ifaddr(LLTABLE(ifp));
1487 
1488 	if ((ifp->if_flags & IFF_UP) != 0)
1489 		arp_handle_ifllchange(ifp);
1490 }
1491 
1492 static void
1493 vnet_arp_init(void)
1494 {
1495 
1496 	if (IS_DEFAULT_VNET(curvnet)) {
1497 		netisr_register(&arp_nh);
1498 		iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event,
1499 		    arp_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
1500 	}
1501 #ifdef VIMAGE
1502 	else
1503 		netisr_register_vnet(&arp_nh);
1504 #endif
1505 }
1506 VNET_SYSINIT(vnet_arp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND,
1507     vnet_arp_init, 0);
1508 
1509 #ifdef VIMAGE
1510 /*
1511  * We have to unregister ARP along with IP otherwise we risk doing INADDR_HASH
1512  * lookups after destroying the hash.  Ideally this would go on SI_ORDER_3.5.
1513  */
1514 static void
1515 vnet_arp_destroy(__unused void *arg)
1516 {
1517 
1518 	netisr_unregister_vnet(&arp_nh);
1519 }
1520 VNET_SYSUNINIT(vnet_arp_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD,
1521     vnet_arp_destroy, NULL);
1522 #endif
1523