xref: /freebsd/sys/netinet/if_ether.c (revision 783d3ff6d7fae619db8a7990b8a6387de0c677b5)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * Ethernet address resolution protocol.
34  * TODO:
35  *	add "inuse/lock" bit (or ref. count) along with valid bit
36  */
37 
38 #include <sys/cdefs.h>
39 #include "opt_inet.h"
40 
41 #include <sys/param.h>
42 #include <sys/eventhandler.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/queue.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/proc.h>
51 #include <sys/socket.h>
52 #include <sys/syslog.h>
53 
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/if_dl.h>
57 #include <net/if_private.h>
58 #include <net/if_types.h>
59 #include <net/netisr.h>
60 #include <net/ethernet.h>
61 #include <net/route.h>
62 #include <net/route/nhop.h>
63 #include <net/vnet.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_fib.h>
67 #include <netinet/in_var.h>
68 #include <net/if_llatbl.h>
69 #include <netinet/if_ether.h>
70 #ifdef INET
71 #include <netinet/ip_carp.h>
72 #endif
73 
74 #include <security/mac/mac_framework.h>
75 
76 #define SIN(s) ((const struct sockaddr_in *)(s))
77 
78 static struct timeval arp_lastlog;
79 static int arp_curpps;
80 static int arp_maxpps = 1;
81 
82 /* Simple ARP state machine */
83 enum arp_llinfo_state {
84 	ARP_LLINFO_INCOMPLETE = 0, /* No LLE data */
85 	ARP_LLINFO_REACHABLE,	/* LLE is valid */
86 	ARP_LLINFO_VERIFY,	/* LLE is valid, need refresh */
87 	ARP_LLINFO_DELETED,	/* LLE is deleted */
88 };
89 
90 SYSCTL_DECL(_net_link_ether);
91 static SYSCTL_NODE(_net_link_ether, PF_INET, inet,
92     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
93     "");
94 static SYSCTL_NODE(_net_link_ether, PF_ARP, arp,
95     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
96     "");
97 
98 /* timer values */
99 VNET_DEFINE_STATIC(int, arpt_keep) = (20*60);	/* once resolved, good for 20
100 						 * minutes */
101 VNET_DEFINE_STATIC(int, arp_maxtries) = 5;
102 VNET_DEFINE_STATIC(int, arp_proxyall) = 0;
103 VNET_DEFINE_STATIC(int, arpt_down) = 20;	/* keep incomplete entries for
104 						 * 20 seconds */
105 VNET_DEFINE_STATIC(int, arpt_rexmit) = 1;	/* retransmit arp entries, sec*/
106 VNET_PCPUSTAT_DEFINE(struct arpstat, arpstat);  /* ARP statistics, see if_arp.h */
107 VNET_PCPUSTAT_SYSINIT(arpstat);
108 
109 #ifdef VIMAGE
110 VNET_PCPUSTAT_SYSUNINIT(arpstat);
111 #endif /* VIMAGE */
112 
113 VNET_DEFINE_STATIC(int, arp_maxhold) = 16;
114 
115 #define	V_arpt_keep		VNET(arpt_keep)
116 #define	V_arpt_down		VNET(arpt_down)
117 #define	V_arpt_rexmit		VNET(arpt_rexmit)
118 #define	V_arp_maxtries		VNET(arp_maxtries)
119 #define	V_arp_proxyall		VNET(arp_proxyall)
120 #define	V_arp_maxhold		VNET(arp_maxhold)
121 
122 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age, CTLFLAG_VNET | CTLFLAG_RW,
123 	&VNET_NAME(arpt_keep), 0,
124 	"ARP entry lifetime in seconds");
125 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries, CTLFLAG_VNET | CTLFLAG_RW,
126 	&VNET_NAME(arp_maxtries), 0,
127 	"ARP resolution attempts before returning error");
128 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall, CTLFLAG_VNET | CTLFLAG_RW,
129 	&VNET_NAME(arp_proxyall), 0,
130 	"Enable proxy ARP for all suitable requests");
131 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, wait, CTLFLAG_VNET | CTLFLAG_RW,
132 	&VNET_NAME(arpt_down), 0,
133 	"Incomplete ARP entry lifetime in seconds");
134 SYSCTL_VNET_PCPUSTAT(_net_link_ether_arp, OID_AUTO, stats, struct arpstat,
135     arpstat, "ARP statistics (struct arpstat, net/if_arp.h)");
136 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold, CTLFLAG_VNET | CTLFLAG_RW,
137 	&VNET_NAME(arp_maxhold), 0,
138 	"Number of packets to hold per ARP entry");
139 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_log_per_second,
140 	CTLFLAG_RW, &arp_maxpps, 0,
141 	"Maximum number of remotely triggered ARP messages that can be "
142 	"logged per second");
143 
144 /*
145  * Due to the exponential backoff algorithm used for the interval between GARP
146  * retransmissions, the maximum number of retransmissions is limited for
147  * sanity. This limit corresponds to a maximum interval between retransmissions
148  * of 2^16 seconds ~= 18 hours.
149  *
150  * Making this limit more dynamic is more complicated than worthwhile,
151  * especially since sending out GARPs spaced days apart would be of little
152  * use. A maximum dynamic limit would look something like:
153  *
154  * const int max = fls(INT_MAX / hz) - 1;
155  */
156 #define MAX_GARP_RETRANSMITS 16
157 static int sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS);
158 static int garp_rexmit_count = 0; /* GARP retransmission setting. */
159 
160 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, garp_rexmit_count,
161     CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
162     &garp_rexmit_count, 0, sysctl_garp_rexmit, "I",
163     "Number of times to retransmit GARP packets;"
164     " 0 to disable, maximum of 16");
165 
166 VNET_DEFINE_STATIC(int, arp_log_level) = LOG_INFO;	/* Min. log(9) level. */
167 #define	V_arp_log_level		VNET(arp_log_level)
168 SYSCTL_INT(_net_link_ether_arp, OID_AUTO, log_level, CTLFLAG_VNET | CTLFLAG_RW,
169 	&VNET_NAME(arp_log_level), 0,
170 	"Minimum log(9) level for recording rate limited arp log messages. "
171 	"The higher will be log more (emerg=0, info=6 (default), debug=7).");
172 #define	ARP_LOG(pri, ...)	do {					\
173 	if ((pri) <= V_arp_log_level &&					\
174 	    ppsratecheck(&arp_lastlog, &arp_curpps, arp_maxpps))	\
175 		log((pri), "arp: " __VA_ARGS__);			\
176 } while (0)
177 
178 static void	arpintr(struct mbuf *);
179 static void	arptimer(void *);
180 #ifdef INET
181 static void	in_arpinput(struct mbuf *);
182 #endif
183 
184 static void arp_check_update_lle(struct arphdr *ah, struct in_addr isaddr,
185     struct ifnet *ifp, int bridged, struct llentry *la);
186 static void arp_mark_lle_reachable(struct llentry *la, struct ifnet *ifp);
187 static void arp_iflladdr(void *arg __unused, struct ifnet *ifp);
188 
189 static eventhandler_tag iflladdr_tag;
190 
191 static const struct netisr_handler arp_nh = {
192 	.nh_name = "arp",
193 	.nh_handler = arpintr,
194 	.nh_proto = NETISR_ARP,
195 	.nh_policy = NETISR_POLICY_SOURCE,
196 };
197 
198 /*
199  * Timeout routine.  Age arp_tab entries periodically.
200  */
201 static void
202 arptimer(void *arg)
203 {
204 	struct llentry *lle = (struct llentry *)arg;
205 	struct ifnet *ifp;
206 
207 	if (lle->la_flags & LLE_STATIC) {
208 		return;
209 	}
210 	LLE_WLOCK(lle);
211 	if (callout_pending(&lle->lle_timer)) {
212 		/*
213 		 * Here we are a bit odd here in the treatment of
214 		 * active/pending. If the pending bit is set, it got
215 		 * rescheduled before I ran. The active
216 		 * bit we ignore, since if it was stopped
217 		 * in ll_tablefree() and was currently running
218 		 * it would have return 0 so the code would
219 		 * not have deleted it since the callout could
220 		 * not be stopped so we want to go through
221 		 * with the delete here now. If the callout
222 		 * was restarted, the pending bit will be back on and
223 		 * we just want to bail since the callout_reset would
224 		 * return 1 and our reference would have been removed
225 		 * by arpresolve() below.
226 		 */
227 		LLE_WUNLOCK(lle);
228  		return;
229  	}
230 	ifp = lle->lle_tbl->llt_ifp;
231 	CURVNET_SET(ifp->if_vnet);
232 
233 	switch (lle->ln_state) {
234 	case ARP_LLINFO_REACHABLE:
235 
236 		/*
237 		 * Expiration time is approaching.
238 		 * Request usage feedback from the datapath.
239 		 * Change state and re-schedule ourselves.
240 		 */
241 		llentry_request_feedback(lle);
242 		lle->ln_state = ARP_LLINFO_VERIFY;
243 		callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit);
244 		LLE_WUNLOCK(lle);
245 		CURVNET_RESTORE();
246 		return;
247 	case ARP_LLINFO_VERIFY:
248 		if (llentry_get_hittime(lle) > 0 && lle->la_preempt > 0) {
249 			/* Entry was used, issue refresh request */
250 			struct epoch_tracker et;
251 			struct in_addr dst;
252 
253 			dst = lle->r_l3addr.addr4;
254 			lle->la_preempt--;
255 			callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit);
256 			LLE_WUNLOCK(lle);
257 			NET_EPOCH_ENTER(et);
258 			arprequest(ifp, NULL, &dst, NULL);
259 			NET_EPOCH_EXIT(et);
260 			CURVNET_RESTORE();
261 			return;
262 		}
263 		/* Nothing happened. Reschedule if not too late */
264 		if (lle->la_expire > time_uptime) {
265 			callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit);
266 			LLE_WUNLOCK(lle);
267 			CURVNET_RESTORE();
268 			return;
269 		}
270 		break;
271 	case ARP_LLINFO_INCOMPLETE:
272 	case ARP_LLINFO_DELETED:
273 		break;
274 	}
275 
276 	if ((lle->la_flags & LLE_DELETED) == 0) {
277 		int evt;
278 
279 		if (lle->la_flags & LLE_VALID)
280 			evt = LLENTRY_EXPIRED;
281 		else
282 			evt = LLENTRY_TIMEDOUT;
283 		EVENTHANDLER_INVOKE(lle_event, lle, evt);
284 	}
285 
286 	callout_stop(&lle->lle_timer);
287 
288 	/* XXX: LOR avoidance. We still have ref on lle. */
289 	LLE_WUNLOCK(lle);
290 	IF_AFDATA_LOCK(ifp);
291 	LLE_WLOCK(lle);
292 
293 	/* Guard against race with other llentry_free(). */
294 	if (lle->la_flags & LLE_LINKED) {
295 		LLE_REMREF(lle);
296 		lltable_unlink_entry(lle->lle_tbl, lle);
297 	}
298 	IF_AFDATA_UNLOCK(ifp);
299 
300 	size_t pkts_dropped = llentry_free(lle);
301 
302 	ARPSTAT_ADD(dropped, pkts_dropped);
303 	ARPSTAT_INC(timeouts);
304 
305 	CURVNET_RESTORE();
306 }
307 
308 /*
309  * Stores link-layer header for @ifp in format suitable for if_output()
310  * into buffer @buf. Resulting header length is stored in @bufsize.
311  *
312  * Returns 0 on success.
313  */
314 static int
315 arp_fillheader(struct ifnet *ifp, struct arphdr *ah, int bcast, u_char *buf,
316     size_t *bufsize)
317 {
318 	struct if_encap_req ereq;
319 	int error;
320 
321 	bzero(buf, *bufsize);
322 	bzero(&ereq, sizeof(ereq));
323 	ereq.buf = buf;
324 	ereq.bufsize = *bufsize;
325 	ereq.rtype = IFENCAP_LL;
326 	ereq.family = AF_ARP;
327 	ereq.lladdr = ar_tha(ah);
328 	ereq.hdata = (u_char *)ah;
329 	if (bcast)
330 		ereq.flags = IFENCAP_FLAG_BROADCAST;
331 	error = ifp->if_requestencap(ifp, &ereq);
332 	if (error == 0)
333 		*bufsize = ereq.bufsize;
334 
335 	return (error);
336 }
337 
338 /*
339  * Broadcast an ARP request. Caller specifies:
340  *	- arp header source ip address
341  *	- arp header target ip address
342  *	- arp header source ethernet address
343  */
344 static int
345 arprequest_internal(struct ifnet *ifp, const struct in_addr *sip,
346     const struct in_addr *tip, u_char *enaddr)
347 {
348 	struct mbuf *m;
349 	struct arphdr *ah;
350 	struct sockaddr sa;
351 	u_char *carpaddr = NULL;
352 	uint8_t linkhdr[LLE_MAX_LINKHDR];
353 	size_t linkhdrsize;
354 	struct route ro;
355 	int error;
356 
357 	NET_EPOCH_ASSERT();
358 
359 	if (sip == NULL) {
360 		/*
361 		 * The caller did not supply a source address, try to find
362 		 * a compatible one among those assigned to this interface.
363 		 */
364 		struct ifaddr *ifa;
365 
366 		CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
367 			if (ifa->ifa_addr->sa_family != AF_INET)
368 				continue;
369 
370 			if (ifa->ifa_carp) {
371 				if ((*carp_iamatch_p)(ifa, &carpaddr) == 0)
372 					continue;
373 				sip = &IA_SIN(ifa)->sin_addr;
374 			} else {
375 				carpaddr = NULL;
376 				sip = &IA_SIN(ifa)->sin_addr;
377 			}
378 
379 			if (0 == ((sip->s_addr ^ tip->s_addr) &
380 			    IA_MASKSIN(ifa)->sin_addr.s_addr))
381 				break;  /* found it. */
382 		}
383 		if (sip == NULL) {
384 			printf("%s: cannot find matching address\n", __func__);
385 			return (EADDRNOTAVAIL);
386 		}
387 	}
388 	if (enaddr == NULL)
389 		enaddr = carpaddr ? carpaddr : (u_char *)IF_LLADDR(ifp);
390 
391 	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
392 		return (ENOMEM);
393 	m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) +
394 		2 * ifp->if_addrlen;
395 	m->m_pkthdr.len = m->m_len;
396 	M_ALIGN(m, m->m_len);
397 	ah = mtod(m, struct arphdr *);
398 	bzero((caddr_t)ah, m->m_len);
399 #ifdef MAC
400 	mac_netinet_arp_send(ifp, m);
401 #endif
402 	ah->ar_pro = htons(ETHERTYPE_IP);
403 	ah->ar_hln = ifp->if_addrlen;		/* hardware address length */
404 	ah->ar_pln = sizeof(struct in_addr);	/* protocol address length */
405 	ah->ar_op = htons(ARPOP_REQUEST);
406 	bcopy(enaddr, ar_sha(ah), ah->ar_hln);
407 	bcopy(sip, ar_spa(ah), ah->ar_pln);
408 	bcopy(tip, ar_tpa(ah), ah->ar_pln);
409 	sa.sa_family = AF_ARP;
410 	sa.sa_len = 2;
411 
412 	/* Calculate link header for sending frame */
413 	bzero(&ro, sizeof(ro));
414 	linkhdrsize = sizeof(linkhdr);
415 	error = arp_fillheader(ifp, ah, 1, linkhdr, &linkhdrsize);
416 	if (error != 0 && error != EAFNOSUPPORT) {
417 		m_freem(m);
418 		ARP_LOG(LOG_ERR, "Failed to calculate ARP header on %s: %d\n",
419 		    if_name(ifp), error);
420 		return (error);
421 	}
422 
423 	ro.ro_prepend = linkhdr;
424 	ro.ro_plen = linkhdrsize;
425 	ro.ro_flags = 0;
426 
427 	m->m_flags |= M_BCAST;
428 	m_clrprotoflags(m);	/* Avoid confusing lower layers. */
429 	error = (*ifp->if_output)(ifp, m, &sa, &ro);
430 	ARPSTAT_INC(txrequests);
431 	if (error) {
432 		ARPSTAT_INC(txerrors);
433 		ARP_LOG(LOG_DEBUG, "Failed to send ARP packet on %s: %d\n",
434 		    if_name(ifp), error);
435 	}
436 	return (error);
437 }
438 
439 void
440 arprequest(struct ifnet *ifp, const struct in_addr *sip,
441     const struct in_addr *tip, u_char *enaddr)
442 {
443 
444 	(void) arprequest_internal(ifp, sip, tip, enaddr);
445 }
446 
447 /*
448  * Resolve an IP address into an ethernet address - heavy version.
449  * Used internally by arpresolve().
450  * We have already checked that we can't use an existing lle without
451  * modification so we have to acquire an LLE_EXCLUSIVE lle lock.
452  *
453  * On success, desten and pflags are filled in and the function returns 0;
454  * If the packet must be held pending resolution, we return EWOULDBLOCK
455  * On other errors, we return the corresponding error code.
456  * Note that m_freem() handles NULL.
457  */
458 static int
459 arpresolve_full(struct ifnet *ifp, int is_gw, int flags, struct mbuf *m,
460 	const struct sockaddr *dst, u_char *desten, uint32_t *pflags,
461 	struct llentry **plle)
462 {
463 	struct llentry *la = NULL, *la_tmp;
464 	int error, renew;
465 	char *lladdr;
466 	int ll_len;
467 
468 	NET_EPOCH_ASSERT();
469 
470 	if (pflags != NULL)
471 		*pflags = 0;
472 	if (plle != NULL)
473 		*plle = NULL;
474 
475 	if ((flags & LLE_CREATE) == 0)
476 		la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
477 	if (la == NULL && (ifp->if_flags & (IFF_NOARP | IFF_STATICARP)) == 0) {
478 		la = lltable_alloc_entry(LLTABLE(ifp), 0, dst);
479 		if (la == NULL) {
480 			char addrbuf[INET_ADDRSTRLEN];
481 
482 			log(LOG_DEBUG,
483 			    "arpresolve: can't allocate llinfo for %s on %s\n",
484 			    inet_ntoa_r(SIN(dst)->sin_addr, addrbuf),
485 			    if_name(ifp));
486 			m_freem(m);
487 			return (EINVAL);
488 		}
489 
490 		IF_AFDATA_WLOCK(ifp);
491 		LLE_WLOCK(la);
492 		la_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
493 		/* Prefer ANY existing lle over newly-created one */
494 		if (la_tmp == NULL)
495 			lltable_link_entry(LLTABLE(ifp), la);
496 		IF_AFDATA_WUNLOCK(ifp);
497 		if (la_tmp != NULL) {
498 			lltable_free_entry(LLTABLE(ifp), la);
499 			la = la_tmp;
500 		}
501 	}
502 	if (la == NULL) {
503 		m_freem(m);
504 		return (EINVAL);
505 	}
506 
507 	if ((la->la_flags & LLE_VALID) &&
508 	    ((la->la_flags & LLE_STATIC) || la->la_expire > time_uptime)) {
509 		if (flags & LLE_ADDRONLY) {
510 			lladdr = la->ll_addr;
511 			ll_len = ifp->if_addrlen;
512 		} else {
513 			lladdr = la->r_linkdata;
514 			ll_len = la->r_hdrlen;
515 		}
516 		bcopy(lladdr, desten, ll_len);
517 
518 		/* Notify LLE code that the entry was used by datapath */
519 		llentry_provide_feedback(la);
520 		if (pflags != NULL)
521 			*pflags = la->la_flags & (LLE_VALID|LLE_IFADDR);
522 		if (plle) {
523 			LLE_ADDREF(la);
524 			*plle = la;
525 		}
526 		LLE_WUNLOCK(la);
527 		return (0);
528 	}
529 
530 	renew = (la->la_asked == 0 || la->la_expire != time_uptime);
531 
532 	/*
533 	 * There is an arptab entry, but no ethernet address
534 	 * response yet.  Add the mbuf to the list, dropping
535 	 * the oldest packet if we have exceeded the system
536 	 * setting.
537 	 */
538 	if (m != NULL) {
539 		size_t dropped = lltable_append_entry_queue(la, m, V_arp_maxhold);
540 		ARPSTAT_ADD(dropped, dropped);
541 	}
542 
543 	/*
544 	 * Return EWOULDBLOCK if we have tried less than arp_maxtries. It
545 	 * will be masked by ether_output(). Return EHOSTDOWN/EHOSTUNREACH
546 	 * if we have already sent arp_maxtries ARP requests. Retransmit the
547 	 * ARP request, but not faster than one request per second.
548 	 */
549 	if (la->la_asked < V_arp_maxtries)
550 		error = EWOULDBLOCK;	/* First request. */
551 	else
552 		error = is_gw != 0 ? EHOSTUNREACH : EHOSTDOWN;
553 
554 	if (renew) {
555 		int canceled, e;
556 
557 		LLE_ADDREF(la);
558 		la->la_expire = time_uptime;
559 		canceled = callout_reset(&la->lle_timer, hz * V_arpt_down,
560 		    arptimer, la);
561 		if (canceled)
562 			LLE_REMREF(la);
563 		la->la_asked++;
564 		LLE_WUNLOCK(la);
565 		e = arprequest_internal(ifp, NULL, &SIN(dst)->sin_addr, NULL);
566 		/*
567 		 * Only overwrite 'error' in case of error; in case of success
568 		 * the proper return value was already set above.
569 		 */
570 		if (e != 0)
571 			return (e);
572 		return (error);
573 	}
574 
575 	LLE_WUNLOCK(la);
576 	return (error);
577 }
578 
579 /*
580  * Lookups link header based on an IP address.
581  * On input:
582  *    ifp is the interface we use
583  *    is_gw != 0 if @dst represents gateway to some destination
584  *    m is the mbuf. May be NULL if we don't have a packet.
585  *    dst is the next hop,
586  *    desten is the storage to put LL header.
587  *    flags returns subset of lle flags: LLE_VALID | LLE_IFADDR
588  *
589  * On success, full/partial link header and flags are filled in and
590  * the function returns 0.
591  * If the packet must be held pending resolution, we return EWOULDBLOCK
592  * On other errors, we return the corresponding error code.
593  * Note that m_freem() handles NULL.
594  */
595 int
596 arpresolve(struct ifnet *ifp, int is_gw, struct mbuf *m,
597 	const struct sockaddr *dst, u_char *desten, uint32_t *pflags,
598 	struct llentry **plle)
599 {
600 	struct llentry *la = NULL;
601 
602 	NET_EPOCH_ASSERT();
603 
604 	if (pflags != NULL)
605 		*pflags = 0;
606 	if (plle != NULL)
607 		*plle = NULL;
608 
609 	if (m != NULL) {
610 		if (m->m_flags & M_BCAST) {
611 			/* broadcast */
612 			(void)memcpy(desten,
613 			    ifp->if_broadcastaddr, ifp->if_addrlen);
614 			return (0);
615 		}
616 		if (m->m_flags & M_MCAST) {
617 			/* multicast */
618 			ETHER_MAP_IP_MULTICAST(&SIN(dst)->sin_addr, desten);
619 			return (0);
620 		}
621 	}
622 
623 	la = lla_lookup(LLTABLE(ifp), plle ? LLE_EXCLUSIVE : LLE_UNLOCKED, dst);
624 	if (la != NULL && (la->r_flags & RLLE_VALID) != 0) {
625 		/* Entry found, let's copy lle info */
626 		bcopy(la->r_linkdata, desten, la->r_hdrlen);
627 		if (pflags != NULL)
628 			*pflags = LLE_VALID | (la->r_flags & RLLE_IFADDR);
629 		/* Notify the LLE handling code that the entry was used. */
630 		llentry_provide_feedback(la);
631 		if (plle) {
632 			LLE_ADDREF(la);
633 			*plle = la;
634 			LLE_WUNLOCK(la);
635 		}
636 		return (0);
637 	}
638 	if (plle && la)
639 		LLE_WUNLOCK(la);
640 
641 	return (arpresolve_full(ifp, is_gw, la == NULL ? LLE_CREATE : 0, m, dst,
642 	    desten, pflags, plle));
643 }
644 
645 /*
646  * Common length and type checks are done here,
647  * then the protocol-specific routine is called.
648  */
649 static void
650 arpintr(struct mbuf *m)
651 {
652 	struct arphdr *ar;
653 	struct ifnet *ifp;
654 	char *layer;
655 	int hlen;
656 
657 	ifp = m->m_pkthdr.rcvif;
658 
659 	if (m->m_len < sizeof(struct arphdr) &&
660 	    ((m = m_pullup(m, sizeof(struct arphdr))) == NULL)) {
661 		ARP_LOG(LOG_NOTICE, "packet with short header received on %s\n",
662 		    if_name(ifp));
663 		return;
664 	}
665 	ar = mtod(m, struct arphdr *);
666 
667 	/* Check if length is sufficient */
668 	if (m->m_len <  arphdr_len(ar)) {
669 		m = m_pullup(m, arphdr_len(ar));
670 		if (m == NULL) {
671 			ARP_LOG(LOG_NOTICE, "short packet received on %s\n",
672 			    if_name(ifp));
673 			return;
674 		}
675 		ar = mtod(m, struct arphdr *);
676 	}
677 
678 	hlen = 0;
679 	layer = "";
680 	switch (ntohs(ar->ar_hrd)) {
681 	case ARPHRD_ETHER:
682 		hlen = ETHER_ADDR_LEN; /* RFC 826 */
683 		layer = "ethernet";
684 		break;
685 	case ARPHRD_INFINIBAND:
686 		hlen = 20;	/* RFC 4391, INFINIBAND_ALEN */
687 		layer = "infiniband";
688 		break;
689 	case ARPHRD_IEEE1394:
690 		hlen = 0; /* SHALL be 16 */ /* RFC 2734 */
691 		layer = "firewire";
692 
693 		/*
694 		 * Restrict too long hardware addresses.
695 		 * Currently we are capable of handling 20-byte
696 		 * addresses ( sizeof(lle->ll_addr) )
697 		 */
698 		if (ar->ar_hln >= 20)
699 			hlen = 16;
700 		break;
701 	default:
702 		ARP_LOG(LOG_NOTICE,
703 		    "packet with unknown hardware format 0x%02d received on "
704 		    "%s\n", ntohs(ar->ar_hrd), if_name(ifp));
705 		m_freem(m);
706 		return;
707 	}
708 
709 	if (hlen != 0 && hlen != ar->ar_hln) {
710 		ARP_LOG(LOG_NOTICE,
711 		    "packet with invalid %s address length %d received on %s\n",
712 		    layer, ar->ar_hln, if_name(ifp));
713 		m_freem(m);
714 		return;
715 	}
716 
717 	ARPSTAT_INC(received);
718 	switch (ntohs(ar->ar_pro)) {
719 #ifdef INET
720 	case ETHERTYPE_IP:
721 		in_arpinput(m);
722 		return;
723 #endif
724 	}
725 	m_freem(m);
726 }
727 
728 #ifdef INET
729 /*
730  * ARP for Internet protocols on 10 Mb/s Ethernet.
731  * Algorithm is that given in RFC 826.
732  * In addition, a sanity check is performed on the sender
733  * protocol address, to catch impersonators.
734  * We no longer handle negotiations for use of trailer protocol:
735  * Formerly, ARP replied for protocol type ETHERTYPE_TRAIL sent
736  * along with IP replies if we wanted trailers sent to us,
737  * and also sent them in response to IP replies.
738  * This allowed either end to announce the desire to receive
739  * trailer packets.
740  * We no longer reply to requests for ETHERTYPE_TRAIL protocol either,
741  * but formerly didn't normally send requests.
742  */
743 static int log_arp_wrong_iface = 1;
744 static int log_arp_movements = 1;
745 static int log_arp_permanent_modify = 1;
746 static int allow_multicast = 0;
747 
748 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_wrong_iface, CTLFLAG_RW,
749 	&log_arp_wrong_iface, 0,
750 	"log arp packets arriving on the wrong interface");
751 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_movements, CTLFLAG_RW,
752 	&log_arp_movements, 0,
753 	"log arp replies from MACs different than the one in the cache");
754 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_permanent_modify, CTLFLAG_RW,
755 	&log_arp_permanent_modify, 0,
756 	"log arp replies from MACs different than the one in the permanent arp entry");
757 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, allow_multicast, CTLFLAG_RW,
758 	&allow_multicast, 0, "accept multicast addresses");
759 
760 static void
761 in_arpinput(struct mbuf *m)
762 {
763 	struct arphdr *ah;
764 	struct ifnet *ifp = m->m_pkthdr.rcvif;
765 	struct llentry *la = NULL, *la_tmp;
766 	struct ifaddr *ifa;
767 	struct in_ifaddr *ia;
768 	struct sockaddr sa;
769 	struct in_addr isaddr, itaddr, myaddr;
770 	u_int8_t *enaddr = NULL;
771 	int op;
772 	int bridged = 0, is_bridge = 0;
773 	int carped;
774 	struct sockaddr_in sin;
775 	struct sockaddr *dst;
776 	struct nhop_object *nh;
777 	uint8_t linkhdr[LLE_MAX_LINKHDR];
778 	struct route ro;
779 	size_t linkhdrsize;
780 	int lladdr_off;
781 	int error;
782 	char addrbuf[INET_ADDRSTRLEN];
783 
784 	NET_EPOCH_ASSERT();
785 
786 	sin.sin_len = sizeof(struct sockaddr_in);
787 	sin.sin_family = AF_INET;
788 	sin.sin_addr.s_addr = 0;
789 
790 	if (ifp->if_bridge)
791 		bridged = 1;
792 	if (ifp->if_type == IFT_BRIDGE)
793 		is_bridge = 1;
794 
795 	/*
796 	 * We already have checked that mbuf contains enough contiguous data
797 	 * to hold entire arp message according to the arp header.
798 	 */
799 	ah = mtod(m, struct arphdr *);
800 
801 	/*
802 	 * ARP is only for IPv4 so we can reject packets with
803 	 * a protocol length not equal to an IPv4 address.
804 	 */
805 	if (ah->ar_pln != sizeof(struct in_addr)) {
806 		ARP_LOG(LOG_NOTICE, "requested protocol length != %zu\n",
807 		    sizeof(struct in_addr));
808 		goto drop;
809 	}
810 
811 	if (allow_multicast == 0 && ETHER_IS_MULTICAST(ar_sha(ah))) {
812 		ARP_LOG(LOG_NOTICE, "%*D is multicast\n",
813 		    ifp->if_addrlen, (u_char *)ar_sha(ah), ":");
814 		goto drop;
815 	}
816 
817 	op = ntohs(ah->ar_op);
818 	(void)memcpy(&isaddr, ar_spa(ah), sizeof (isaddr));
819 	(void)memcpy(&itaddr, ar_tpa(ah), sizeof (itaddr));
820 
821 	if (op == ARPOP_REPLY)
822 		ARPSTAT_INC(rxreplies);
823 
824 	/*
825 	 * For a bridge, we want to check the address irrespective
826 	 * of the receive interface. (This will change slightly
827 	 * when we have clusters of interfaces).
828 	 */
829 	CK_LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) {
830 		if (((bridged && ia->ia_ifp->if_bridge == ifp->if_bridge) ||
831 		    ia->ia_ifp == ifp) &&
832 		    itaddr.s_addr == ia->ia_addr.sin_addr.s_addr &&
833 		    (ia->ia_ifa.ifa_carp == NULL ||
834 		    (*carp_iamatch_p)(&ia->ia_ifa, &enaddr))) {
835 			ifa_ref(&ia->ia_ifa);
836 			goto match;
837 		}
838 	}
839 	CK_LIST_FOREACH(ia, INADDR_HASH(isaddr.s_addr), ia_hash)
840 		if (((bridged && ia->ia_ifp->if_bridge == ifp->if_bridge) ||
841 		    ia->ia_ifp == ifp) &&
842 		    isaddr.s_addr == ia->ia_addr.sin_addr.s_addr) {
843 			ifa_ref(&ia->ia_ifa);
844 			goto match;
845 		}
846 
847 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia)				\
848   (ia->ia_ifp->if_bridge == ifp->if_softc &&				\
849   !bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) &&	\
850   addr == ia->ia_addr.sin_addr.s_addr)
851 	/*
852 	 * Check the case when bridge shares its MAC address with
853 	 * some of its children, so packets are claimed by bridge
854 	 * itself (bridge_input() does it first), but they are really
855 	 * meant to be destined to the bridge member.
856 	 */
857 	if (is_bridge) {
858 		CK_LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) {
859 			if (BDG_MEMBER_MATCHES_ARP(itaddr.s_addr, ifp, ia)) {
860 				ifa_ref(&ia->ia_ifa);
861 				ifp = ia->ia_ifp;
862 				goto match;
863 			}
864 		}
865 	}
866 #undef BDG_MEMBER_MATCHES_ARP
867 
868 	/*
869 	 * No match, use the first inet address on the receive interface
870 	 * as a dummy address for the rest of the function.
871 	 */
872 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
873 		if (ifa->ifa_addr->sa_family == AF_INET &&
874 		    (ifa->ifa_carp == NULL ||
875 		    (*carp_iamatch_p)(ifa, &enaddr))) {
876 			ia = ifatoia(ifa);
877 			ifa_ref(ifa);
878 			goto match;
879 		}
880 
881 	/*
882 	 * If bridging, fall back to using any inet address.
883 	 */
884 	if (!bridged || (ia = CK_STAILQ_FIRST(&V_in_ifaddrhead)) == NULL)
885 		goto drop;
886 	ifa_ref(&ia->ia_ifa);
887 match:
888 	if (!enaddr)
889 		enaddr = (u_int8_t *)IF_LLADDR(ifp);
890 	carped = (ia->ia_ifa.ifa_carp != NULL);
891 	myaddr = ia->ia_addr.sin_addr;
892 	ifa_free(&ia->ia_ifa);
893 	if (!bcmp(ar_sha(ah), enaddr, ifp->if_addrlen))
894 		goto drop;	/* it's from me, ignore it. */
895 	if (!bcmp(ar_sha(ah), ifp->if_broadcastaddr, ifp->if_addrlen)) {
896 		ARP_LOG(LOG_NOTICE, "link address is broadcast for IP address "
897 		    "%s!\n", inet_ntoa_r(isaddr, addrbuf));
898 		goto drop;
899 	}
900 
901 	if (ifp->if_addrlen != ah->ar_hln) {
902 		ARP_LOG(LOG_WARNING, "from %*D: addr len: new %d, "
903 		    "i/f %d (ignored)\n", ifp->if_addrlen,
904 		    (u_char *) ar_sha(ah), ":", ah->ar_hln,
905 		    ifp->if_addrlen);
906 		goto drop;
907 	}
908 
909 	/*
910 	 * Warn if another host is using the same IP address, but only if the
911 	 * IP address isn't 0.0.0.0, which is used for DHCP only, in which
912 	 * case we suppress the warning to avoid false positive complaints of
913 	 * potential misconfiguration.
914 	 */
915 	if (!bridged && !carped && isaddr.s_addr == myaddr.s_addr &&
916 	    myaddr.s_addr != 0) {
917 		ARP_LOG(LOG_ERR, "%*D is using my IP address %s on %s!\n",
918 		   ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
919 		   inet_ntoa_r(isaddr, addrbuf), ifp->if_xname);
920 		itaddr = myaddr;
921 		ARPSTAT_INC(dupips);
922 		goto reply;
923 	}
924 	if (ifp->if_flags & IFF_STATICARP)
925 		goto reply;
926 
927 	bzero(&sin, sizeof(sin));
928 	sin.sin_len = sizeof(struct sockaddr_in);
929 	sin.sin_family = AF_INET;
930 	sin.sin_addr = isaddr;
931 	dst = (struct sockaddr *)&sin;
932 	la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
933 	if (la != NULL)
934 		arp_check_update_lle(ah, isaddr, ifp, bridged, la);
935 	else if (itaddr.s_addr == myaddr.s_addr) {
936 		/*
937 		 * Request/reply to our address, but no lle exists yet.
938 		 * Calculate full link prepend to use in lle.
939 		 */
940 		linkhdrsize = sizeof(linkhdr);
941 		if (lltable_calc_llheader(ifp, AF_INET, ar_sha(ah), linkhdr,
942 		    &linkhdrsize, &lladdr_off) != 0)
943 			goto reply;
944 
945 		/* Allocate new entry */
946 		la = lltable_alloc_entry(LLTABLE(ifp), 0, dst);
947 		if (la == NULL) {
948 			/*
949 			 * lle creation may fail if source address belongs
950 			 * to non-directly connected subnet. However, we
951 			 * will try to answer the request instead of dropping
952 			 * frame.
953 			 */
954 			goto reply;
955 		}
956 		lltable_set_entry_addr(ifp, la, linkhdr, linkhdrsize,
957 		    lladdr_off);
958 
959 		IF_AFDATA_WLOCK(ifp);
960 		LLE_WLOCK(la);
961 		la_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
962 
963 		/*
964 		 * Check if lle still does not exists.
965 		 * If it does, that means that we either
966 		 * 1) have configured it explicitly, via
967 		 * 1a) 'arp -s' static entry or
968 		 * 1b) interface address static record
969 		 * or
970 		 * 2) it was the result of sending first packet to-host
971 		 * or
972 		 * 3) it was another arp reply packet we handled in
973 		 * different thread.
974 		 *
975 		 * In all cases except 3) we definitely need to prefer
976 		 * existing lle. For the sake of simplicity, prefer any
977 		 * existing lle over newly-create one.
978 		 */
979 		if (la_tmp == NULL)
980 			lltable_link_entry(LLTABLE(ifp), la);
981 		IF_AFDATA_WUNLOCK(ifp);
982 
983 		if (la_tmp == NULL) {
984 			arp_mark_lle_reachable(la, ifp);
985 			LLE_WUNLOCK(la);
986 		} else {
987 			/* Free newly-create entry and handle packet */
988 			lltable_free_entry(LLTABLE(ifp), la);
989 			la = la_tmp;
990 			la_tmp = NULL;
991 			arp_check_update_lle(ah, isaddr, ifp, bridged, la);
992 			/* arp_check_update_lle() returns @la unlocked */
993 		}
994 		la = NULL;
995 	}
996 reply:
997 	if (op != ARPOP_REQUEST)
998 		goto drop;
999 	ARPSTAT_INC(rxrequests);
1000 
1001 	if (itaddr.s_addr == myaddr.s_addr) {
1002 		/* Shortcut.. the receiving interface is the target. */
1003 		(void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
1004 		(void)memcpy(ar_sha(ah), enaddr, ah->ar_hln);
1005 	} else {
1006 		/*
1007 		 * Destination address is not ours. Check if
1008 		 * proxyarp entry exists or proxyarp is turned on globally.
1009 		 */
1010 		struct llentry *lle;
1011 
1012 		sin.sin_addr = itaddr;
1013 		lle = lla_lookup(LLTABLE(ifp), 0, (struct sockaddr *)&sin);
1014 
1015 		if ((lle != NULL) && (lle->la_flags & LLE_PUB)) {
1016 			(void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
1017 			(void)memcpy(ar_sha(ah), lle->ll_addr, ah->ar_hln);
1018 			LLE_RUNLOCK(lle);
1019 		} else {
1020 			if (lle != NULL)
1021 				LLE_RUNLOCK(lle);
1022 
1023 			if (!V_arp_proxyall)
1024 				goto drop;
1025 
1026 			NET_EPOCH_ASSERT();
1027 			nh = fib4_lookup(ifp->if_fib, itaddr, 0, 0, 0);
1028 			if (nh == NULL)
1029 				goto drop;
1030 
1031 			/*
1032 			 * Don't send proxies for nodes on the same interface
1033 			 * as this one came out of, or we'll get into a fight
1034 			 * over who claims what Ether address.
1035 			 */
1036 			if (nh->nh_ifp == ifp)
1037 				goto drop;
1038 
1039 			(void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
1040 			(void)memcpy(ar_sha(ah), enaddr, ah->ar_hln);
1041 
1042 			/*
1043 			 * Also check that the node which sent the ARP packet
1044 			 * is on the interface we expect it to be on. This
1045 			 * avoids ARP chaos if an interface is connected to the
1046 			 * wrong network.
1047 			 */
1048 
1049 			nh = fib4_lookup(ifp->if_fib, isaddr, 0, 0, 0);
1050 			if (nh == NULL)
1051 				goto drop;
1052 			if (nh->nh_ifp != ifp) {
1053 				ARP_LOG(LOG_INFO, "proxy: ignoring request"
1054 				    " from %s via %s\n",
1055 				    inet_ntoa_r(isaddr, addrbuf),
1056 				    ifp->if_xname);
1057 				goto drop;
1058 			}
1059 
1060 #ifdef DEBUG_PROXY
1061 			printf("arp: proxying for %s\n",
1062 			    inet_ntoa_r(itaddr, addrbuf));
1063 #endif
1064 		}
1065 	}
1066 
1067 	if (itaddr.s_addr == myaddr.s_addr &&
1068 	    IN_LINKLOCAL(ntohl(itaddr.s_addr))) {
1069 		/* RFC 3927 link-local IPv4; always reply by broadcast. */
1070 #ifdef DEBUG_LINKLOCAL
1071 		printf("arp: sending reply for link-local addr %s\n",
1072 		    inet_ntoa_r(itaddr, addrbuf));
1073 #endif
1074 		m->m_flags |= M_BCAST;
1075 		m->m_flags &= ~M_MCAST;
1076 	} else {
1077 		/* default behaviour; never reply by broadcast. */
1078 		m->m_flags &= ~(M_BCAST|M_MCAST);
1079 	}
1080 	(void)memcpy(ar_tpa(ah), ar_spa(ah), ah->ar_pln);
1081 	(void)memcpy(ar_spa(ah), &itaddr, ah->ar_pln);
1082 	ah->ar_op = htons(ARPOP_REPLY);
1083 	ah->ar_pro = htons(ETHERTYPE_IP); /* let's be sure! */
1084 	m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + (2 * ah->ar_hln);
1085 	m->m_pkthdr.len = m->m_len;
1086 	m->m_pkthdr.rcvif = NULL;
1087 	sa.sa_family = AF_ARP;
1088 	sa.sa_len = 2;
1089 
1090 	/* Calculate link header for sending frame */
1091 	bzero(&ro, sizeof(ro));
1092 	linkhdrsize = sizeof(linkhdr);
1093 	error = arp_fillheader(ifp, ah, 0, linkhdr, &linkhdrsize);
1094 
1095 	/*
1096 	 * arp_fillheader() may fail due to lack of support inside encap request
1097 	 * routing. This is not necessary an error, AF_ARP can/should be handled
1098 	 * by if_output().
1099 	 */
1100 	if (error != 0 && error != EAFNOSUPPORT) {
1101 		ARP_LOG(LOG_ERR, "Failed to calculate ARP header on %s: %d\n",
1102 		    if_name(ifp), error);
1103 		goto drop;
1104 	}
1105 
1106 	ro.ro_prepend = linkhdr;
1107 	ro.ro_plen = linkhdrsize;
1108 	ro.ro_flags = 0;
1109 
1110 	m_clrprotoflags(m);	/* Avoid confusing lower layers. */
1111 	(*ifp->if_output)(ifp, m, &sa, &ro);
1112 	ARPSTAT_INC(txreplies);
1113 	return;
1114 
1115 drop:
1116 	m_freem(m);
1117 }
1118 #endif
1119 
1120 static struct mbuf *
1121 arp_grab_holdchain(struct llentry *la)
1122 {
1123 	struct mbuf *chain;
1124 
1125 	LLE_WLOCK_ASSERT(la);
1126 
1127 	chain = la->la_hold;
1128 	la->la_hold = NULL;
1129 	la->la_numheld = 0;
1130 
1131 	return (chain);
1132 }
1133 
1134 static void
1135 arp_flush_holdchain(struct ifnet *ifp, struct llentry *la, struct mbuf *chain)
1136 {
1137 	struct mbuf *m_hold, *m_hold_next;
1138 	struct sockaddr_in sin;
1139 
1140 	NET_EPOCH_ASSERT();
1141 
1142 	struct route ro = {
1143 		.ro_prepend = la->r_linkdata,
1144 		.ro_plen = la->r_hdrlen,
1145 	};
1146 
1147 	lltable_fill_sa_entry(la, (struct sockaddr *)&sin);
1148 
1149 	for (m_hold = chain; m_hold != NULL; m_hold = m_hold_next) {
1150 		m_hold_next = m_hold->m_nextpkt;
1151 		m_hold->m_nextpkt = NULL;
1152 		/* Avoid confusing lower layers. */
1153 		m_clrprotoflags(m_hold);
1154 		(*ifp->if_output)(ifp, m_hold, (struct sockaddr *)&sin, &ro);
1155 	}
1156 }
1157 
1158 /*
1159  * Checks received arp data against existing @la.
1160  * Updates lle state/performs notification if necessary.
1161  */
1162 static void
1163 arp_check_update_lle(struct arphdr *ah, struct in_addr isaddr, struct ifnet *ifp,
1164     int bridged, struct llentry *la)
1165 {
1166 	uint8_t linkhdr[LLE_MAX_LINKHDR];
1167 	size_t linkhdrsize;
1168 	int lladdr_off;
1169 	char addrbuf[INET_ADDRSTRLEN];
1170 
1171 	LLE_WLOCK_ASSERT(la);
1172 
1173 	/* the following is not an error when doing bridging */
1174 	if (!bridged && la->lle_tbl->llt_ifp != ifp) {
1175 		if (log_arp_wrong_iface)
1176 			ARP_LOG(LOG_WARNING, "%s is on %s "
1177 			    "but got reply from %*D on %s\n",
1178 			    inet_ntoa_r(isaddr, addrbuf),
1179 			    la->lle_tbl->llt_ifp->if_xname,
1180 			    ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
1181 			    ifp->if_xname);
1182 		LLE_WUNLOCK(la);
1183 		return;
1184 	}
1185 	if ((la->la_flags & LLE_VALID) &&
1186 	    bcmp(ar_sha(ah), la->ll_addr, ifp->if_addrlen)) {
1187 		if (la->la_flags & LLE_STATIC) {
1188 			LLE_WUNLOCK(la);
1189 			if (log_arp_permanent_modify)
1190 				ARP_LOG(LOG_ERR,
1191 				    "%*D attempts to modify "
1192 				    "permanent entry for %s on %s\n",
1193 				    ifp->if_addrlen,
1194 				    (u_char *)ar_sha(ah), ":",
1195 				    inet_ntoa_r(isaddr, addrbuf),
1196 				    ifp->if_xname);
1197 			return;
1198 		}
1199 		if (log_arp_movements) {
1200 			ARP_LOG(LOG_INFO, "%s moved from %*D "
1201 			    "to %*D on %s\n",
1202 			    inet_ntoa_r(isaddr, addrbuf),
1203 			    ifp->if_addrlen,
1204 			    (u_char *)la->ll_addr, ":",
1205 			    ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
1206 			    ifp->if_xname);
1207 		}
1208 	}
1209 
1210 	/* Calculate full link prepend to use in lle */
1211 	linkhdrsize = sizeof(linkhdr);
1212 	if (lltable_calc_llheader(ifp, AF_INET, ar_sha(ah), linkhdr,
1213 	    &linkhdrsize, &lladdr_off) != 0) {
1214 		LLE_WUNLOCK(la);
1215 		return;
1216 	}
1217 
1218 	/* Check if something has changed */
1219 	if (memcmp(la->r_linkdata, linkhdr, linkhdrsize) != 0 ||
1220 	    (la->la_flags & LLE_VALID) == 0) {
1221 		/* Try to perform LLE update */
1222 		if (lltable_try_set_entry_addr(ifp, la, linkhdr, linkhdrsize,
1223 		    lladdr_off) == 0) {
1224 			LLE_WUNLOCK(la);
1225 			return;
1226 		}
1227 
1228 		/* Clear fast path feedback request if set */
1229 		llentry_mark_used(la);
1230 	}
1231 
1232 	arp_mark_lle_reachable(la, ifp);
1233 
1234 	/*
1235 	 * The packets are all freed within the call to the output
1236 	 * routine.
1237 	 *
1238 	 * NB: The lock MUST be released before the call to the
1239 	 * output routine.
1240 	 */
1241 	if (la->la_hold != NULL) {
1242 		struct mbuf *chain;
1243 
1244 		chain = arp_grab_holdchain(la);
1245 		LLE_WUNLOCK(la);
1246 		arp_flush_holdchain(ifp, la, chain);
1247 	} else
1248 		LLE_WUNLOCK(la);
1249 }
1250 
1251 static void
1252 arp_mark_lle_reachable(struct llentry *la, struct ifnet *ifp)
1253 {
1254 	int canceled, wtime;
1255 
1256 	LLE_WLOCK_ASSERT(la);
1257 
1258 	la->ln_state = ARP_LLINFO_REACHABLE;
1259 	EVENTHANDLER_INVOKE(lle_event, la, LLENTRY_RESOLVED);
1260 
1261 	if ((ifp->if_flags & IFF_STICKYARP) != 0)
1262 		la->la_flags |= LLE_STATIC;
1263 
1264 	if (!(la->la_flags & LLE_STATIC)) {
1265 		LLE_ADDREF(la);
1266 		la->la_expire = time_uptime + V_arpt_keep;
1267 		wtime = V_arpt_keep - V_arp_maxtries * V_arpt_rexmit;
1268 		if (wtime < 0)
1269 			wtime = V_arpt_keep;
1270 		canceled = callout_reset(&la->lle_timer,
1271 		    hz * wtime, arptimer, la);
1272 		if (canceled)
1273 			LLE_REMREF(la);
1274 	}
1275 	la->la_asked = 0;
1276 	la->la_preempt = V_arp_maxtries;
1277 }
1278 
1279 /*
1280  * Add permanent link-layer record for given interface address.
1281  */
1282 static __noinline void
1283 arp_add_ifa_lle(struct ifnet *ifp, const struct sockaddr *dst)
1284 {
1285 	struct llentry *lle, *lle_tmp;
1286 
1287 	/*
1288 	 * Interface address LLE record is considered static
1289 	 * because kernel code relies on LLE_STATIC flag to check
1290 	 * if these entries can be rewriten by arp updates.
1291 	 */
1292 	lle = lltable_alloc_entry(LLTABLE(ifp), LLE_IFADDR | LLE_STATIC, dst);
1293 	if (lle == NULL) {
1294 		log(LOG_INFO, "arp_ifinit: cannot create arp "
1295 		    "entry for interface address\n");
1296 		return;
1297 	}
1298 
1299 	IF_AFDATA_WLOCK(ifp);
1300 	LLE_WLOCK(lle);
1301 	/* Unlink any entry if exists */
1302 	lle_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
1303 	if (lle_tmp != NULL)
1304 		lltable_unlink_entry(LLTABLE(ifp), lle_tmp);
1305 
1306 	lltable_link_entry(LLTABLE(ifp), lle);
1307 	IF_AFDATA_WUNLOCK(ifp);
1308 
1309 	if (lle_tmp != NULL)
1310 		EVENTHANDLER_INVOKE(lle_event, lle_tmp, LLENTRY_EXPIRED);
1311 
1312 	EVENTHANDLER_INVOKE(lle_event, lle, LLENTRY_RESOLVED);
1313 	LLE_WUNLOCK(lle);
1314 	if (lle_tmp != NULL)
1315 		lltable_free_entry(LLTABLE(ifp), lle_tmp);
1316 }
1317 
1318 /*
1319  * Handle the garp_rexmit_count. Like sysctl_handle_int(), but limits the range
1320  * of valid values.
1321  */
1322 static int
1323 sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS)
1324 {
1325 	int error;
1326 	int rexmit_count = *(int *)arg1;
1327 
1328 	error = sysctl_handle_int(oidp, &rexmit_count, 0, req);
1329 
1330 	/* Enforce limits on any new value that may have been set. */
1331 	if (!error && req->newptr) {
1332 		/* A new value was set. */
1333 		if (rexmit_count < 0) {
1334 			rexmit_count = 0;
1335 		} else if (rexmit_count > MAX_GARP_RETRANSMITS) {
1336 			rexmit_count = MAX_GARP_RETRANSMITS;
1337 		}
1338 		*(int *)arg1 = rexmit_count;
1339 	}
1340 
1341 	return (error);
1342 }
1343 
1344 /*
1345  * Retransmit a Gratuitous ARP (GARP) and, if necessary, schedule a callout to
1346  * retransmit it again. A pending callout owns a reference to the ifa.
1347  */
1348 static void
1349 garp_rexmit(void *arg)
1350 {
1351 	struct in_ifaddr *ia = arg;
1352 
1353 	if (callout_pending(&ia->ia_garp_timer) ||
1354 	    !callout_active(&ia->ia_garp_timer)) {
1355 		IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1356 		ifa_free(&ia->ia_ifa);
1357 		return;
1358 	}
1359 
1360 	CURVNET_SET(ia->ia_ifa.ifa_ifp->if_vnet);
1361 
1362 	/*
1363 	 * Drop lock while the ARP request is generated.
1364 	 */
1365 	IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1366 
1367 	arprequest(ia->ia_ifa.ifa_ifp, &IA_SIN(ia)->sin_addr,
1368 	    &IA_SIN(ia)->sin_addr, IF_LLADDR(ia->ia_ifa.ifa_ifp));
1369 
1370 	/*
1371 	 * Increment the count of retransmissions. If the count has reached the
1372 	 * maximum value, stop sending the GARP packets. Otherwise, schedule
1373 	 * the callout to retransmit another GARP packet.
1374 	 */
1375 	++ia->ia_garp_count;
1376 	if (ia->ia_garp_count >= garp_rexmit_count) {
1377 		ifa_free(&ia->ia_ifa);
1378 	} else {
1379 		int rescheduled;
1380 		IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
1381 		rescheduled = callout_reset(&ia->ia_garp_timer,
1382 		    (1 << ia->ia_garp_count) * hz,
1383 		    garp_rexmit, ia);
1384 		IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1385 		if (rescheduled) {
1386 			ifa_free(&ia->ia_ifa);
1387 		}
1388 	}
1389 
1390 	CURVNET_RESTORE();
1391 }
1392 
1393 /*
1394  * Start the GARP retransmit timer.
1395  *
1396  * A single GARP is always transmitted when an IPv4 address is added
1397  * to an interface and that is usually sufficient. However, in some
1398  * circumstances, such as when a shared address is passed between
1399  * cluster nodes, this single GARP may occasionally be dropped or
1400  * lost. This can lead to neighbors on the network link working with a
1401  * stale ARP cache and sending packets destined for that address to
1402  * the node that previously owned the address, which may not respond.
1403  *
1404  * To avoid this situation, GARP retransmits can be enabled by setting
1405  * the net.link.ether.inet.garp_rexmit_count sysctl to a value greater
1406  * than zero. The setting represents the maximum number of
1407  * retransmissions. The interval between retransmissions is calculated
1408  * using an exponential backoff algorithm, doubling each time, so the
1409  * retransmission intervals are: {1, 2, 4, 8, 16, ...} (seconds).
1410  */
1411 static void
1412 garp_timer_start(struct ifaddr *ifa)
1413 {
1414 	struct in_ifaddr *ia = (struct in_ifaddr *) ifa;
1415 
1416 	IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
1417 	ia->ia_garp_count = 0;
1418 	if (callout_reset(&ia->ia_garp_timer, (1 << ia->ia_garp_count) * hz,
1419 	    garp_rexmit, ia) == 0) {
1420 		ifa_ref(ifa);
1421 	}
1422 	IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
1423 }
1424 
1425 void
1426 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
1427 {
1428 	struct epoch_tracker et;
1429 	const struct sockaddr_in *dst_in;
1430 	const struct sockaddr *dst;
1431 
1432 	if (ifa->ifa_carp != NULL)
1433 		return;
1434 
1435 	dst = ifa->ifa_addr;
1436 	dst_in = (const struct sockaddr_in *)dst;
1437 
1438 	if (ntohl(dst_in->sin_addr.s_addr) == INADDR_ANY)
1439 		return;
1440 	NET_EPOCH_ENTER(et);
1441 	arp_announce_ifaddr(ifp, dst_in->sin_addr, IF_LLADDR(ifp));
1442 	NET_EPOCH_EXIT(et);
1443 	if (garp_rexmit_count > 0) {
1444 		garp_timer_start(ifa);
1445 	}
1446 
1447 	arp_add_ifa_lle(ifp, dst);
1448 }
1449 
1450 void
1451 arp_announce_ifaddr(struct ifnet *ifp, struct in_addr addr, u_char *enaddr)
1452 {
1453 
1454 	if (ntohl(addr.s_addr) != INADDR_ANY)
1455 		arprequest(ifp, &addr, &addr, enaddr);
1456 }
1457 
1458 /*
1459  * Sends gratuitous ARPs for each ifaddr to notify other
1460  * nodes about the address change.
1461  */
1462 static __noinline void
1463 arp_handle_ifllchange(struct ifnet *ifp)
1464 {
1465 	struct ifaddr *ifa;
1466 
1467 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1468 		if (ifa->ifa_addr->sa_family == AF_INET)
1469 			arp_ifinit(ifp, ifa);
1470 	}
1471 }
1472 
1473 /*
1474  * A handler for interface link layer address change event.
1475  */
1476 static void
1477 arp_iflladdr(void *arg __unused, struct ifnet *ifp)
1478 {
1479 	/* if_bridge can update its lladdr during if_vmove(), after we've done
1480 	 * if_detach_internal()/dom_ifdetach(). */
1481 	if (ifp->if_afdata[AF_INET] == NULL)
1482 		return;
1483 
1484 	lltable_update_ifaddr(LLTABLE(ifp));
1485 
1486 	if ((ifp->if_flags & IFF_UP) != 0)
1487 		arp_handle_ifllchange(ifp);
1488 }
1489 
1490 static void
1491 vnet_arp_init(void)
1492 {
1493 
1494 	if (IS_DEFAULT_VNET(curvnet)) {
1495 		netisr_register(&arp_nh);
1496 		iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event,
1497 		    arp_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
1498 	}
1499 #ifdef VIMAGE
1500 	else
1501 		netisr_register_vnet(&arp_nh);
1502 #endif
1503 }
1504 VNET_SYSINIT(vnet_arp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND,
1505     vnet_arp_init, 0);
1506 
1507 #ifdef VIMAGE
1508 /*
1509  * We have to unregister ARP along with IP otherwise we risk doing INADDR_HASH
1510  * lookups after destroying the hash.  Ideally this would go on SI_ORDER_3.5.
1511  */
1512 static void
1513 vnet_arp_destroy(__unused void *arg)
1514 {
1515 
1516 	netisr_unregister_vnet(&arp_nh);
1517 }
1518 VNET_SYSUNINIT(vnet_arp_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD,
1519     vnet_arp_destroy, NULL);
1520 #endif
1521