1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * Ethernet address resolution protocol. 34 * TODO: 35 * add "inuse/lock" bit (or ref. count) along with valid bit 36 */ 37 38 #include <sys/cdefs.h> 39 #include "opt_inet.h" 40 41 #include <sys/param.h> 42 #include <sys/eventhandler.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/queue.h> 46 #include <sys/sysctl.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/proc.h> 51 #include <sys/socket.h> 52 #include <sys/syslog.h> 53 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_dl.h> 57 #include <net/if_private.h> 58 #include <net/if_types.h> 59 #include <net/if_bridgevar.h> 60 #include <net/netisr.h> 61 #include <net/ethernet.h> 62 #include <net/route.h> 63 #include <net/route/nhop.h> 64 #include <net/vnet.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_fib.h> 68 #include <netinet/in_var.h> 69 #include <net/if_llatbl.h> 70 #include <netinet/if_ether.h> 71 #ifdef INET 72 #include <netinet/ip_carp.h> 73 #endif 74 75 #include <security/mac/mac_framework.h> 76 77 #define SIN(s) ((const struct sockaddr_in *)(s)) 78 79 static struct timeval arp_lastlog; 80 static int arp_curpps; 81 static int arp_maxpps = 1; 82 83 /* Simple ARP state machine */ 84 enum arp_llinfo_state { 85 ARP_LLINFO_INCOMPLETE = 0, /* No LLE data */ 86 ARP_LLINFO_REACHABLE, /* LLE is valid */ 87 ARP_LLINFO_VERIFY, /* LLE is valid, need refresh */ 88 ARP_LLINFO_DELETED, /* LLE is deleted */ 89 }; 90 91 SYSCTL_DECL(_net_link_ether); 92 static SYSCTL_NODE(_net_link_ether, PF_INET, inet, 93 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 94 ""); 95 static SYSCTL_NODE(_net_link_ether, PF_ARP, arp, 96 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 97 ""); 98 99 /* timer values */ 100 VNET_DEFINE_STATIC(int, arpt_keep) = (20*60); /* once resolved, good for 20 101 * minutes */ 102 VNET_DEFINE_STATIC(int, arp_maxtries) = 5; 103 VNET_DEFINE_STATIC(int, arp_proxyall) = 0; 104 VNET_DEFINE_STATIC(int, arpt_down) = 20; /* keep incomplete entries for 105 * 20 seconds */ 106 VNET_DEFINE_STATIC(int, arpt_rexmit) = 1; /* retransmit arp entries, sec*/ 107 VNET_PCPUSTAT_DEFINE(struct arpstat, arpstat); /* ARP statistics, see if_arp.h */ 108 VNET_PCPUSTAT_SYSINIT(arpstat); 109 110 #ifdef VIMAGE 111 VNET_PCPUSTAT_SYSUNINIT(arpstat); 112 #endif /* VIMAGE */ 113 114 VNET_DEFINE_STATIC(int, arp_maxhold) = 16; 115 116 #define V_arpt_keep VNET(arpt_keep) 117 #define V_arpt_down VNET(arpt_down) 118 #define V_arpt_rexmit VNET(arpt_rexmit) 119 #define V_arp_maxtries VNET(arp_maxtries) 120 #define V_arp_proxyall VNET(arp_proxyall) 121 #define V_arp_maxhold VNET(arp_maxhold) 122 123 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age, CTLFLAG_VNET | CTLFLAG_RW, 124 &VNET_NAME(arpt_keep), 0, 125 "ARP entry lifetime in seconds"); 126 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries, CTLFLAG_VNET | CTLFLAG_RW, 127 &VNET_NAME(arp_maxtries), 0, 128 "ARP resolution attempts before returning error"); 129 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall, CTLFLAG_VNET | CTLFLAG_RW, 130 &VNET_NAME(arp_proxyall), 0, 131 "Enable proxy ARP for all suitable requests"); 132 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, wait, CTLFLAG_VNET | CTLFLAG_RW, 133 &VNET_NAME(arpt_down), 0, 134 "Incomplete ARP entry lifetime in seconds"); 135 SYSCTL_VNET_PCPUSTAT(_net_link_ether_arp, OID_AUTO, stats, struct arpstat, 136 arpstat, "ARP statistics (struct arpstat, net/if_arp.h)"); 137 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold, CTLFLAG_VNET | CTLFLAG_RW, 138 &VNET_NAME(arp_maxhold), 0, 139 "Number of packets to hold per ARP entry"); 140 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_log_per_second, 141 CTLFLAG_RW, &arp_maxpps, 0, 142 "Maximum number of remotely triggered ARP messages that can be " 143 "logged per second"); 144 145 /* 146 * Due to the exponential backoff algorithm used for the interval between GARP 147 * retransmissions, the maximum number of retransmissions is limited for 148 * sanity. This limit corresponds to a maximum interval between retransmissions 149 * of 2^16 seconds ~= 18 hours. 150 * 151 * Making this limit more dynamic is more complicated than worthwhile, 152 * especially since sending out GARPs spaced days apart would be of little 153 * use. A maximum dynamic limit would look something like: 154 * 155 * const int max = fls(INT_MAX / hz) - 1; 156 */ 157 #define MAX_GARP_RETRANSMITS 16 158 static int sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS); 159 VNET_DEFINE_STATIC(int, garp_rexmit_count) = 0; /* GARP retransmission setting. */ 160 #define V_garp_rexmit_count VNET(garp_rexmit_count) 161 162 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, garp_rexmit_count, 163 CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE|CTLFLAG_VNET, 164 &VNET_NAME(garp_rexmit_count), 0, sysctl_garp_rexmit, "I", 165 "Number of times to retransmit GARP packets;" 166 " 0 to disable, maximum of 16"); 167 168 VNET_DEFINE_STATIC(int, arp_log_level) = LOG_INFO; /* Min. log(9) level. */ 169 #define V_arp_log_level VNET(arp_log_level) 170 SYSCTL_INT(_net_link_ether_arp, OID_AUTO, log_level, CTLFLAG_VNET | CTLFLAG_RW, 171 &VNET_NAME(arp_log_level), 0, 172 "Minimum log(9) level for recording rate limited arp log messages. " 173 "The higher will be log more (emerg=0, info=6 (default), debug=7)."); 174 #define ARP_LOG(pri, ...) do { \ 175 if ((pri) <= V_arp_log_level && \ 176 ppsratecheck(&arp_lastlog, &arp_curpps, arp_maxpps)) \ 177 log((pri), "arp: " __VA_ARGS__); \ 178 } while (0) 179 180 static void arpintr(struct mbuf *); 181 static void arptimer(void *); 182 #ifdef INET 183 static void in_arpinput(struct mbuf *); 184 #endif 185 186 static void arp_check_update_lle(struct arphdr *ah, struct in_addr isaddr, 187 struct ifnet *ifp, int bridged, struct llentry *la); 188 static void arp_mark_lle_reachable(struct llentry *la, struct ifnet *ifp); 189 static void arp_iflladdr(void *arg __unused, struct ifnet *ifp); 190 191 static eventhandler_tag iflladdr_tag; 192 193 static const struct netisr_handler arp_nh = { 194 .nh_name = "arp", 195 .nh_handler = arpintr, 196 .nh_proto = NETISR_ARP, 197 .nh_policy = NETISR_POLICY_SOURCE, 198 }; 199 200 /* 201 * Timeout routine. Age arp_tab entries periodically. 202 */ 203 static void 204 arptimer(void *arg) 205 { 206 struct llentry *lle = (struct llentry *)arg; 207 struct ifnet *ifp; 208 209 if (lle->la_flags & LLE_STATIC) { 210 return; 211 } 212 LLE_WLOCK(lle); 213 if (callout_pending(&lle->lle_timer)) { 214 /* 215 * Here we are a bit odd here in the treatment of 216 * active/pending. If the pending bit is set, it got 217 * rescheduled before I ran. The active 218 * bit we ignore, since if it was stopped 219 * in ll_tablefree() and was currently running 220 * it would have return 0 so the code would 221 * not have deleted it since the callout could 222 * not be stopped so we want to go through 223 * with the delete here now. If the callout 224 * was restarted, the pending bit will be back on and 225 * we just want to bail since the callout_reset would 226 * return 1 and our reference would have been removed 227 * by arpresolve() below. 228 */ 229 LLE_WUNLOCK(lle); 230 return; 231 } 232 ifp = lle->lle_tbl->llt_ifp; 233 CURVNET_SET(ifp->if_vnet); 234 235 switch (lle->ln_state) { 236 case ARP_LLINFO_REACHABLE: 237 238 /* 239 * Expiration time is approaching. 240 * Request usage feedback from the datapath. 241 * Change state and re-schedule ourselves. 242 */ 243 llentry_request_feedback(lle); 244 lle->ln_state = ARP_LLINFO_VERIFY; 245 callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit); 246 LLE_WUNLOCK(lle); 247 CURVNET_RESTORE(); 248 return; 249 case ARP_LLINFO_VERIFY: 250 if (llentry_get_hittime(lle) > 0 && lle->la_preempt > 0) { 251 /* Entry was used, issue refresh request */ 252 struct epoch_tracker et; 253 struct in_addr dst; 254 255 dst = lle->r_l3addr.addr4; 256 lle->la_preempt--; 257 callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit); 258 LLE_WUNLOCK(lle); 259 NET_EPOCH_ENTER(et); 260 arprequest(ifp, NULL, &dst, NULL); 261 NET_EPOCH_EXIT(et); 262 CURVNET_RESTORE(); 263 return; 264 } 265 /* Nothing happened. Reschedule if not too late */ 266 if (lle->la_expire > time_uptime) { 267 callout_schedule(&lle->lle_timer, hz * V_arpt_rexmit); 268 LLE_WUNLOCK(lle); 269 CURVNET_RESTORE(); 270 return; 271 } 272 break; 273 case ARP_LLINFO_INCOMPLETE: 274 case ARP_LLINFO_DELETED: 275 break; 276 } 277 278 if ((lle->la_flags & LLE_DELETED) == 0) { 279 int evt; 280 281 if (lle->la_flags & LLE_VALID) 282 evt = LLENTRY_EXPIRED; 283 else 284 evt = LLENTRY_TIMEDOUT; 285 EVENTHANDLER_INVOKE(lle_event, lle, evt); 286 } 287 288 callout_stop(&lle->lle_timer); 289 290 /* XXX: LOR avoidance. We still have ref on lle. */ 291 LLE_WUNLOCK(lle); 292 IF_AFDATA_LOCK(ifp); 293 LLE_WLOCK(lle); 294 295 /* Guard against race with other llentry_free(). */ 296 if (lle->la_flags & LLE_LINKED) { 297 LLE_REMREF(lle); 298 lltable_unlink_entry(lle->lle_tbl, lle); 299 } 300 IF_AFDATA_UNLOCK(ifp); 301 302 size_t pkts_dropped = llentry_free(lle); 303 304 ARPSTAT_ADD(dropped, pkts_dropped); 305 ARPSTAT_INC(timeouts); 306 307 CURVNET_RESTORE(); 308 } 309 310 /* 311 * Stores link-layer header for @ifp in format suitable for if_output() 312 * into buffer @buf. Resulting header length is stored in @bufsize. 313 * 314 * Returns 0 on success. 315 */ 316 static int 317 arp_fillheader(struct ifnet *ifp, struct arphdr *ah, int bcast, u_char *buf, 318 size_t *bufsize) 319 { 320 struct if_encap_req ereq; 321 int error; 322 323 bzero(buf, *bufsize); 324 bzero(&ereq, sizeof(ereq)); 325 ereq.buf = buf; 326 ereq.bufsize = *bufsize; 327 ereq.rtype = IFENCAP_LL; 328 ereq.family = AF_ARP; 329 ereq.lladdr = ar_tha(ah); 330 ereq.hdata = (u_char *)ah; 331 if (bcast) 332 ereq.flags = IFENCAP_FLAG_BROADCAST; 333 error = ifp->if_requestencap(ifp, &ereq); 334 if (error == 0) 335 *bufsize = ereq.bufsize; 336 337 return (error); 338 } 339 340 /* 341 * Broadcast an ARP request. Caller specifies: 342 * - arp header source ip address 343 * - arp header target ip address 344 * - arp header source ethernet address 345 */ 346 static int 347 arprequest_internal(struct ifnet *ifp, const struct in_addr *sip, 348 const struct in_addr *tip, u_char *enaddr) 349 { 350 struct mbuf *m; 351 struct arphdr *ah; 352 struct sockaddr sa; 353 u_char *carpaddr = NULL; 354 uint8_t linkhdr[LLE_MAX_LINKHDR]; 355 size_t linkhdrsize; 356 struct route ro; 357 int error; 358 359 NET_EPOCH_ASSERT(); 360 361 if (sip == NULL) { 362 /* 363 * The caller did not supply a source address, try to find 364 * a compatible one among those assigned to this interface. 365 */ 366 struct ifaddr *ifa; 367 368 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 369 if (ifa->ifa_addr->sa_family != AF_INET) 370 continue; 371 372 if (ifa->ifa_carp) { 373 if ((*carp_iamatch_p)(ifa, &carpaddr) == 0) 374 continue; 375 sip = &IA_SIN(ifa)->sin_addr; 376 } else { 377 carpaddr = NULL; 378 sip = &IA_SIN(ifa)->sin_addr; 379 } 380 381 if (0 == ((sip->s_addr ^ tip->s_addr) & 382 IA_MASKSIN(ifa)->sin_addr.s_addr)) 383 break; /* found it. */ 384 } 385 if (sip == NULL) { 386 printf("%s: cannot find matching address\n", __func__); 387 return (EADDRNOTAVAIL); 388 } 389 } 390 if (enaddr == NULL) 391 enaddr = carpaddr ? carpaddr : (u_char *)IF_LLADDR(ifp); 392 393 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) 394 return (ENOMEM); 395 m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) + 396 2 * ifp->if_addrlen; 397 m->m_pkthdr.len = m->m_len; 398 M_ALIGN(m, m->m_len); 399 ah = mtod(m, struct arphdr *); 400 bzero((caddr_t)ah, m->m_len); 401 #ifdef MAC 402 mac_netinet_arp_send(ifp, m); 403 #endif 404 ah->ar_pro = htons(ETHERTYPE_IP); 405 ah->ar_hln = ifp->if_addrlen; /* hardware address length */ 406 ah->ar_pln = sizeof(struct in_addr); /* protocol address length */ 407 ah->ar_op = htons(ARPOP_REQUEST); 408 bcopy(enaddr, ar_sha(ah), ah->ar_hln); 409 bcopy(sip, ar_spa(ah), ah->ar_pln); 410 bcopy(tip, ar_tpa(ah), ah->ar_pln); 411 sa.sa_family = AF_ARP; 412 sa.sa_len = 2; 413 414 /* Calculate link header for sending frame */ 415 bzero(&ro, sizeof(ro)); 416 linkhdrsize = sizeof(linkhdr); 417 error = arp_fillheader(ifp, ah, 1, linkhdr, &linkhdrsize); 418 if (error != 0 && error != EAFNOSUPPORT) { 419 m_freem(m); 420 ARP_LOG(LOG_ERR, "Failed to calculate ARP header on %s: %d\n", 421 if_name(ifp), error); 422 return (error); 423 } 424 425 ro.ro_prepend = linkhdr; 426 ro.ro_plen = linkhdrsize; 427 ro.ro_flags = 0; 428 429 m->m_flags |= M_BCAST; 430 m_clrprotoflags(m); /* Avoid confusing lower layers. */ 431 error = (*ifp->if_output)(ifp, m, &sa, &ro); 432 ARPSTAT_INC(txrequests); 433 if (error) { 434 ARPSTAT_INC(txerrors); 435 ARP_LOG(LOG_DEBUG, "Failed to send ARP packet on %s: %d\n", 436 if_name(ifp), error); 437 } 438 return (error); 439 } 440 441 void 442 arprequest(struct ifnet *ifp, const struct in_addr *sip, 443 const struct in_addr *tip, u_char *enaddr) 444 { 445 446 (void) arprequest_internal(ifp, sip, tip, enaddr); 447 } 448 449 /* 450 * Resolve an IP address into an ethernet address - heavy version. 451 * Used internally by arpresolve(). 452 * We have already checked that we can't use an existing lle without 453 * modification so we have to acquire an LLE_EXCLUSIVE lle lock. 454 * 455 * On success, desten and pflags are filled in and the function returns 0; 456 * If the packet must be held pending resolution, we return EWOULDBLOCK 457 * On other errors, we return the corresponding error code. 458 * Note that m_freem() handles NULL. 459 */ 460 static int 461 arpresolve_full(struct ifnet *ifp, int is_gw, int flags, struct mbuf *m, 462 const struct sockaddr *dst, u_char *desten, uint32_t *pflags, 463 struct llentry **plle) 464 { 465 struct llentry *la = NULL, *la_tmp; 466 int error, renew; 467 char *lladdr; 468 int ll_len; 469 470 NET_EPOCH_ASSERT(); 471 472 if (pflags != NULL) 473 *pflags = 0; 474 if (plle != NULL) 475 *plle = NULL; 476 477 if ((flags & LLE_CREATE) == 0) 478 la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst); 479 if (la == NULL && (ifp->if_flags & (IFF_NOARP | IFF_STATICARP)) == 0) { 480 la = lltable_alloc_entry(LLTABLE(ifp), 0, dst); 481 if (la == NULL) { 482 char addrbuf[INET_ADDRSTRLEN]; 483 484 log(LOG_DEBUG, 485 "arpresolve: can't allocate llinfo for %s on %s\n", 486 inet_ntoa_r(SIN(dst)->sin_addr, addrbuf), 487 if_name(ifp)); 488 m_freem(m); 489 return (EINVAL); 490 } 491 492 IF_AFDATA_WLOCK(ifp); 493 LLE_WLOCK(la); 494 la_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst); 495 /* Prefer ANY existing lle over newly-created one */ 496 if (la_tmp == NULL) 497 lltable_link_entry(LLTABLE(ifp), la); 498 IF_AFDATA_WUNLOCK(ifp); 499 if (la_tmp != NULL) { 500 lltable_free_entry(LLTABLE(ifp), la); 501 la = la_tmp; 502 } 503 } 504 if (la == NULL) { 505 m_freem(m); 506 return (EINVAL); 507 } 508 509 if ((la->la_flags & LLE_VALID) && 510 ((la->la_flags & LLE_STATIC) || la->la_expire > time_uptime)) { 511 if (flags & LLE_ADDRONLY) { 512 lladdr = la->ll_addr; 513 ll_len = ifp->if_addrlen; 514 } else { 515 lladdr = la->r_linkdata; 516 ll_len = la->r_hdrlen; 517 } 518 bcopy(lladdr, desten, ll_len); 519 520 /* Notify LLE code that the entry was used by datapath */ 521 llentry_provide_feedback(la); 522 if (pflags != NULL) 523 *pflags = la->la_flags & (LLE_VALID|LLE_IFADDR); 524 if (plle) { 525 LLE_ADDREF(la); 526 *plle = la; 527 } 528 LLE_WUNLOCK(la); 529 return (0); 530 } 531 532 renew = (la->la_asked == 0 || la->la_expire != time_uptime); 533 534 /* 535 * There is an arptab entry, but no ethernet address 536 * response yet. Add the mbuf to the list, dropping 537 * the oldest packet if we have exceeded the system 538 * setting. 539 */ 540 if (m != NULL) { 541 size_t dropped = lltable_append_entry_queue(la, m, V_arp_maxhold); 542 ARPSTAT_ADD(dropped, dropped); 543 } 544 545 /* 546 * Return EWOULDBLOCK if we have tried less than arp_maxtries. It 547 * will be masked by ether_output(). Return EHOSTDOWN/EHOSTUNREACH 548 * if we have already sent arp_maxtries ARP requests. Retransmit the 549 * ARP request, but not faster than one request per second. 550 */ 551 if (la->la_asked < V_arp_maxtries) 552 error = EWOULDBLOCK; /* First request. */ 553 else 554 error = is_gw != 0 ? EHOSTUNREACH : EHOSTDOWN; 555 556 if (renew) { 557 int canceled, e; 558 559 LLE_ADDREF(la); 560 la->la_expire = time_uptime; 561 canceled = callout_reset(&la->lle_timer, hz * V_arpt_down, 562 arptimer, la); 563 if (canceled) 564 LLE_REMREF(la); 565 la->la_asked++; 566 LLE_WUNLOCK(la); 567 e = arprequest_internal(ifp, NULL, &SIN(dst)->sin_addr, NULL); 568 /* 569 * Only overwrite 'error' in case of error; in case of success 570 * the proper return value was already set above. 571 */ 572 if (e != 0) 573 return (e); 574 return (error); 575 } 576 577 LLE_WUNLOCK(la); 578 return (error); 579 } 580 581 /* 582 * Lookups link header based on an IP address. 583 * On input: 584 * ifp is the interface we use 585 * is_gw != 0 if @dst represents gateway to some destination 586 * m is the mbuf. May be NULL if we don't have a packet. 587 * dst is the next hop, 588 * desten is the storage to put LL header. 589 * flags returns subset of lle flags: LLE_VALID | LLE_IFADDR 590 * 591 * On success, full/partial link header and flags are filled in and 592 * the function returns 0. 593 * If the packet must be held pending resolution, we return EWOULDBLOCK 594 * On other errors, we return the corresponding error code. 595 * Note that m_freem() handles NULL. 596 */ 597 int 598 arpresolve(struct ifnet *ifp, int is_gw, struct mbuf *m, 599 const struct sockaddr *dst, u_char *desten, uint32_t *pflags, 600 struct llentry **plle) 601 { 602 struct llentry *la = NULL; 603 604 NET_EPOCH_ASSERT(); 605 606 if (pflags != NULL) 607 *pflags = 0; 608 if (plle != NULL) 609 *plle = NULL; 610 611 if (m != NULL) { 612 if (m->m_flags & M_BCAST) { 613 /* broadcast */ 614 (void)memcpy(desten, 615 ifp->if_broadcastaddr, ifp->if_addrlen); 616 return (0); 617 } 618 if (m->m_flags & M_MCAST) { 619 /* multicast */ 620 ETHER_MAP_IP_MULTICAST(&SIN(dst)->sin_addr, desten); 621 return (0); 622 } 623 } 624 625 la = lla_lookup(LLTABLE(ifp), plle ? LLE_EXCLUSIVE : LLE_UNLOCKED, dst); 626 if (la != NULL && (la->r_flags & RLLE_VALID) != 0) { 627 /* Entry found, let's copy lle info */ 628 bcopy(la->r_linkdata, desten, la->r_hdrlen); 629 if (pflags != NULL) 630 *pflags = LLE_VALID | (la->r_flags & RLLE_IFADDR); 631 /* Notify the LLE handling code that the entry was used. */ 632 llentry_provide_feedback(la); 633 if (plle) { 634 LLE_ADDREF(la); 635 *plle = la; 636 LLE_WUNLOCK(la); 637 } 638 return (0); 639 } 640 if (plle && la) 641 LLE_WUNLOCK(la); 642 643 return (arpresolve_full(ifp, is_gw, la == NULL ? LLE_CREATE : 0, m, dst, 644 desten, pflags, plle)); 645 } 646 647 /* 648 * Common length and type checks are done here, 649 * then the protocol-specific routine is called. 650 */ 651 static void 652 arpintr(struct mbuf *m) 653 { 654 struct arphdr *ar; 655 struct ifnet *ifp; 656 char *layer; 657 int hlen; 658 659 ifp = m->m_pkthdr.rcvif; 660 661 if (m->m_len < sizeof(struct arphdr) && 662 ((m = m_pullup(m, sizeof(struct arphdr))) == NULL)) { 663 ARP_LOG(LOG_NOTICE, "packet with short header received on %s\n", 664 if_name(ifp)); 665 return; 666 } 667 ar = mtod(m, struct arphdr *); 668 669 /* Check if length is sufficient */ 670 if (m->m_len < arphdr_len(ar)) { 671 m = m_pullup(m, arphdr_len(ar)); 672 if (m == NULL) { 673 ARP_LOG(LOG_NOTICE, "short packet received on %s\n", 674 if_name(ifp)); 675 return; 676 } 677 ar = mtod(m, struct arphdr *); 678 } 679 680 hlen = 0; 681 layer = ""; 682 switch (ntohs(ar->ar_hrd)) { 683 case ARPHRD_ETHER: 684 hlen = ETHER_ADDR_LEN; /* RFC 826 */ 685 layer = "ethernet"; 686 break; 687 case ARPHRD_IEEE802: 688 hlen = ETHER_ADDR_LEN; 689 layer = "ieee802"; 690 break; 691 case ARPHRD_INFINIBAND: 692 hlen = 20; /* RFC 4391, INFINIBAND_ALEN */ 693 layer = "infiniband"; 694 break; 695 case ARPHRD_IEEE1394: 696 hlen = 0; /* SHALL be 16 */ /* RFC 2734 */ 697 layer = "firewire"; 698 699 /* 700 * Restrict too long hardware addresses. 701 * Currently we are capable of handling 20-byte 702 * addresses ( sizeof(lle->ll_addr) ) 703 */ 704 if (ar->ar_hln >= 20) 705 hlen = 16; 706 break; 707 default: 708 ARP_LOG(LOG_NOTICE, 709 "packet with unknown hardware format 0x%02d received on " 710 "%s\n", ntohs(ar->ar_hrd), if_name(ifp)); 711 m_freem(m); 712 return; 713 } 714 715 if (hlen != 0 && hlen != ar->ar_hln) { 716 ARP_LOG(LOG_NOTICE, 717 "packet with invalid %s address length %d received on %s\n", 718 layer, ar->ar_hln, if_name(ifp)); 719 m_freem(m); 720 return; 721 } 722 723 ARPSTAT_INC(received); 724 switch (ntohs(ar->ar_pro)) { 725 #ifdef INET 726 case ETHERTYPE_IP: 727 in_arpinput(m); 728 return; 729 #endif 730 } 731 m_freem(m); 732 } 733 734 #ifdef INET 735 /* 736 * ARP for Internet protocols on 10 Mb/s Ethernet. 737 * Algorithm is that given in RFC 826. 738 * In addition, a sanity check is performed on the sender 739 * protocol address, to catch impersonators. 740 * We no longer handle negotiations for use of trailer protocol: 741 * Formerly, ARP replied for protocol type ETHERTYPE_TRAIL sent 742 * along with IP replies if we wanted trailers sent to us, 743 * and also sent them in response to IP replies. 744 * This allowed either end to announce the desire to receive 745 * trailer packets. 746 * We no longer reply to requests for ETHERTYPE_TRAIL protocol either, 747 * but formerly didn't normally send requests. 748 */ 749 static int log_arp_wrong_iface = 1; 750 static int log_arp_movements = 1; 751 static int log_arp_permanent_modify = 1; 752 static int allow_multicast = 0; 753 754 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_wrong_iface, CTLFLAG_RW, 755 &log_arp_wrong_iface, 0, 756 "log arp packets arriving on the wrong interface"); 757 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_movements, CTLFLAG_RW, 758 &log_arp_movements, 0, 759 "log arp replies from MACs different than the one in the cache"); 760 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_permanent_modify, CTLFLAG_RW, 761 &log_arp_permanent_modify, 0, 762 "log arp replies from MACs different than the one in the permanent arp entry"); 763 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, allow_multicast, CTLFLAG_RW, 764 &allow_multicast, 0, "accept multicast addresses"); 765 766 static void 767 in_arpinput(struct mbuf *m) 768 { 769 struct arphdr *ah; 770 struct ifnet *ifp = m->m_pkthdr.rcvif; 771 struct llentry *la = NULL, *la_tmp; 772 struct ifaddr *ifa; 773 struct in_ifaddr *ia; 774 struct sockaddr sa; 775 struct in_addr isaddr, itaddr, myaddr; 776 u_int8_t *enaddr = NULL; 777 int op; 778 int bridged = 0, is_bridge = 0; 779 int carped; 780 struct sockaddr_in sin; 781 struct sockaddr *dst; 782 struct nhop_object *nh; 783 uint8_t linkhdr[LLE_MAX_LINKHDR]; 784 struct route ro; 785 size_t linkhdrsize; 786 int lladdr_off; 787 int error; 788 char addrbuf[INET_ADDRSTRLEN]; 789 790 NET_EPOCH_ASSERT(); 791 792 sin.sin_len = sizeof(struct sockaddr_in); 793 sin.sin_family = AF_INET; 794 sin.sin_addr.s_addr = 0; 795 796 if (ifp->if_bridge) 797 bridged = 1; 798 if (ifp->if_type == IFT_BRIDGE) 799 is_bridge = 1; 800 801 /* 802 * We already have checked that mbuf contains enough contiguous data 803 * to hold entire arp message according to the arp header. 804 */ 805 ah = mtod(m, struct arphdr *); 806 807 /* 808 * ARP is only for IPv4 so we can reject packets with 809 * a protocol length not equal to an IPv4 address. 810 */ 811 if (ah->ar_pln != sizeof(struct in_addr)) { 812 ARP_LOG(LOG_NOTICE, "requested protocol length != %zu\n", 813 sizeof(struct in_addr)); 814 goto drop; 815 } 816 817 if (allow_multicast == 0 && ETHER_IS_MULTICAST(ar_sha(ah))) { 818 ARP_LOG(LOG_NOTICE, "%*D is multicast\n", 819 ifp->if_addrlen, (u_char *)ar_sha(ah), ":"); 820 goto drop; 821 } 822 823 op = ntohs(ah->ar_op); 824 (void)memcpy(&isaddr, ar_spa(ah), sizeof (isaddr)); 825 (void)memcpy(&itaddr, ar_tpa(ah), sizeof (itaddr)); 826 827 if (op == ARPOP_REPLY) 828 ARPSTAT_INC(rxreplies); 829 830 /* 831 * For a bridge, we want to check the address irrespective 832 * of the receive interface. (This will change slightly 833 * when we have clusters of interfaces). 834 */ 835 CK_LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) { 836 if (((bridged && bridge_same_p(ia->ia_ifp->if_bridge, ifp->if_bridge)) || 837 ia->ia_ifp == ifp) && 838 itaddr.s_addr == ia->ia_addr.sin_addr.s_addr && 839 (ia->ia_ifa.ifa_carp == NULL || 840 (*carp_iamatch_p)(&ia->ia_ifa, &enaddr))) { 841 ifa_ref(&ia->ia_ifa); 842 goto match; 843 } 844 } 845 CK_LIST_FOREACH(ia, INADDR_HASH(isaddr.s_addr), ia_hash) 846 if (((bridged && bridge_same_p(ia->ia_ifp->if_bridge, ifp->if_bridge)) || 847 ia->ia_ifp == ifp) && 848 isaddr.s_addr == ia->ia_addr.sin_addr.s_addr) { 849 ifa_ref(&ia->ia_ifa); 850 goto match; 851 } 852 853 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \ 854 (bridge_get_softc_p(ia->ia_ifp) == ifp->if_softc && \ 855 !bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) && \ 856 addr == ia->ia_addr.sin_addr.s_addr) 857 /* 858 * Check the case when bridge shares its MAC address with 859 * some of its children, so packets are claimed by bridge 860 * itself (bridge_input() does it first), but they are really 861 * meant to be destined to the bridge member. 862 */ 863 if (is_bridge) { 864 CK_LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) { 865 if (BDG_MEMBER_MATCHES_ARP(itaddr.s_addr, ifp, ia)) { 866 ifa_ref(&ia->ia_ifa); 867 ifp = ia->ia_ifp; 868 goto match; 869 } 870 } 871 } 872 #undef BDG_MEMBER_MATCHES_ARP 873 874 /* 875 * No match, use the first inet address on the receive interface 876 * as a dummy address for the rest of the function. 877 */ 878 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 879 if (ifa->ifa_addr->sa_family == AF_INET && 880 (ifa->ifa_carp == NULL || 881 (*carp_iamatch_p)(ifa, &enaddr))) { 882 ia = ifatoia(ifa); 883 ifa_ref(ifa); 884 goto match; 885 } 886 887 /* 888 * If bridging, fall back to using any inet address. 889 */ 890 if (!bridged || (ia = CK_STAILQ_FIRST(&V_in_ifaddrhead)) == NULL) 891 goto drop; 892 ifa_ref(&ia->ia_ifa); 893 match: 894 if (!enaddr) 895 enaddr = (u_int8_t *)IF_LLADDR(ifp); 896 carped = (ia->ia_ifa.ifa_carp != NULL); 897 myaddr = ia->ia_addr.sin_addr; 898 ifa_free(&ia->ia_ifa); 899 if (!bcmp(ar_sha(ah), enaddr, ifp->if_addrlen)) 900 goto drop; /* it's from me, ignore it. */ 901 if (!bcmp(ar_sha(ah), ifp->if_broadcastaddr, ifp->if_addrlen)) { 902 ARP_LOG(LOG_NOTICE, "link address is broadcast for IP address " 903 "%s!\n", inet_ntoa_r(isaddr, addrbuf)); 904 goto drop; 905 } 906 907 if (ifp->if_addrlen != ah->ar_hln) { 908 ARP_LOG(LOG_WARNING, "from %*D: addr len: new %d, " 909 "i/f %d (ignored)\n", ifp->if_addrlen, 910 (u_char *) ar_sha(ah), ":", ah->ar_hln, 911 ifp->if_addrlen); 912 goto drop; 913 } 914 915 /* 916 * Warn if another host is using the same IP address, but only if the 917 * IP address isn't 0.0.0.0, which is used for DHCP only, in which 918 * case we suppress the warning to avoid false positive complaints of 919 * potential misconfiguration. 920 */ 921 if (!bridged && !carped && isaddr.s_addr == myaddr.s_addr && 922 myaddr.s_addr != 0) { 923 ARP_LOG(LOG_ERR, "%*D is using my IP address %s on %s!\n", 924 ifp->if_addrlen, (u_char *)ar_sha(ah), ":", 925 inet_ntoa_r(isaddr, addrbuf), ifp->if_xname); 926 itaddr = myaddr; 927 ARPSTAT_INC(dupips); 928 goto reply; 929 } 930 if (ifp->if_flags & IFF_STATICARP) 931 goto reply; 932 933 bzero(&sin, sizeof(sin)); 934 sin.sin_len = sizeof(struct sockaddr_in); 935 sin.sin_family = AF_INET; 936 sin.sin_addr = isaddr; 937 dst = (struct sockaddr *)&sin; 938 la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst); 939 if (la != NULL) 940 arp_check_update_lle(ah, isaddr, ifp, bridged, la); 941 else if (itaddr.s_addr == myaddr.s_addr) { 942 /* 943 * Request/reply to our address, but no lle exists yet. 944 * Calculate full link prepend to use in lle. 945 */ 946 linkhdrsize = sizeof(linkhdr); 947 if (lltable_calc_llheader(ifp, AF_INET, ar_sha(ah), linkhdr, 948 &linkhdrsize, &lladdr_off) != 0) 949 goto reply; 950 951 /* Allocate new entry */ 952 la = lltable_alloc_entry(LLTABLE(ifp), 0, dst); 953 if (la == NULL) { 954 /* 955 * lle creation may fail if source address belongs 956 * to non-directly connected subnet. However, we 957 * will try to answer the request instead of dropping 958 * frame. 959 */ 960 goto reply; 961 } 962 lltable_set_entry_addr(ifp, la, linkhdr, linkhdrsize, 963 lladdr_off); 964 965 IF_AFDATA_WLOCK(ifp); 966 LLE_WLOCK(la); 967 la_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst); 968 969 /* 970 * Check if lle still does not exists. 971 * If it does, that means that we either 972 * 1) have configured it explicitly, via 973 * 1a) 'arp -s' static entry or 974 * 1b) interface address static record 975 * or 976 * 2) it was the result of sending first packet to-host 977 * or 978 * 3) it was another arp reply packet we handled in 979 * different thread. 980 * 981 * In all cases except 3) we definitely need to prefer 982 * existing lle. For the sake of simplicity, prefer any 983 * existing lle over newly-create one. 984 */ 985 if (la_tmp == NULL) 986 lltable_link_entry(LLTABLE(ifp), la); 987 IF_AFDATA_WUNLOCK(ifp); 988 989 if (la_tmp == NULL) { 990 arp_mark_lle_reachable(la, ifp); 991 LLE_WUNLOCK(la); 992 } else { 993 /* Free newly-create entry and handle packet */ 994 lltable_free_entry(LLTABLE(ifp), la); 995 la = la_tmp; 996 la_tmp = NULL; 997 arp_check_update_lle(ah, isaddr, ifp, bridged, la); 998 /* arp_check_update_lle() returns @la unlocked */ 999 } 1000 la = NULL; 1001 } 1002 reply: 1003 if (op != ARPOP_REQUEST) 1004 goto drop; 1005 ARPSTAT_INC(rxrequests); 1006 1007 if (itaddr.s_addr == myaddr.s_addr) { 1008 /* Shortcut.. the receiving interface is the target. */ 1009 (void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln); 1010 (void)memcpy(ar_sha(ah), enaddr, ah->ar_hln); 1011 } else { 1012 /* 1013 * Destination address is not ours. Check if 1014 * proxyarp entry exists or proxyarp is turned on globally. 1015 */ 1016 struct llentry *lle; 1017 1018 sin.sin_addr = itaddr; 1019 lle = lla_lookup(LLTABLE(ifp), 0, (struct sockaddr *)&sin); 1020 1021 if ((lle != NULL) && (lle->la_flags & LLE_PUB)) { 1022 (void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln); 1023 (void)memcpy(ar_sha(ah), lle->ll_addr, ah->ar_hln); 1024 LLE_RUNLOCK(lle); 1025 } else { 1026 if (lle != NULL) 1027 LLE_RUNLOCK(lle); 1028 1029 if (!V_arp_proxyall) 1030 goto drop; 1031 1032 NET_EPOCH_ASSERT(); 1033 nh = fib4_lookup(ifp->if_fib, itaddr, 0, 0, 0); 1034 if (nh == NULL) 1035 goto drop; 1036 1037 /* 1038 * Don't send proxies for nodes on the same interface 1039 * as this one came out of, or we'll get into a fight 1040 * over who claims what Ether address. 1041 */ 1042 if (nh->nh_ifp == ifp) 1043 goto drop; 1044 1045 (void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln); 1046 (void)memcpy(ar_sha(ah), enaddr, ah->ar_hln); 1047 1048 /* 1049 * Also check that the node which sent the ARP packet 1050 * is on the interface we expect it to be on. This 1051 * avoids ARP chaos if an interface is connected to the 1052 * wrong network. 1053 */ 1054 1055 nh = fib4_lookup(ifp->if_fib, isaddr, 0, 0, 0); 1056 if (nh == NULL) 1057 goto drop; 1058 if (nh->nh_ifp != ifp) { 1059 ARP_LOG(LOG_INFO, "proxy: ignoring request" 1060 " from %s via %s\n", 1061 inet_ntoa_r(isaddr, addrbuf), 1062 ifp->if_xname); 1063 goto drop; 1064 } 1065 1066 #ifdef DEBUG_PROXY 1067 printf("arp: proxying for %s\n", 1068 inet_ntoa_r(itaddr, addrbuf)); 1069 #endif 1070 } 1071 } 1072 1073 if (itaddr.s_addr == myaddr.s_addr && 1074 IN_LINKLOCAL(ntohl(itaddr.s_addr))) { 1075 /* RFC 3927 link-local IPv4; always reply by broadcast. */ 1076 #ifdef DEBUG_LINKLOCAL 1077 printf("arp: sending reply for link-local addr %s\n", 1078 inet_ntoa_r(itaddr, addrbuf)); 1079 #endif 1080 m->m_flags |= M_BCAST; 1081 m->m_flags &= ~M_MCAST; 1082 } else { 1083 /* default behaviour; never reply by broadcast. */ 1084 m->m_flags &= ~(M_BCAST|M_MCAST); 1085 } 1086 (void)memcpy(ar_tpa(ah), ar_spa(ah), ah->ar_pln); 1087 (void)memcpy(ar_spa(ah), &itaddr, ah->ar_pln); 1088 ah->ar_op = htons(ARPOP_REPLY); 1089 ah->ar_pro = htons(ETHERTYPE_IP); /* let's be sure! */ 1090 m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + (2 * ah->ar_hln); 1091 m->m_pkthdr.len = m->m_len; 1092 m->m_pkthdr.rcvif = NULL; 1093 sa.sa_family = AF_ARP; 1094 sa.sa_len = 2; 1095 1096 /* Calculate link header for sending frame */ 1097 bzero(&ro, sizeof(ro)); 1098 linkhdrsize = sizeof(linkhdr); 1099 error = arp_fillheader(ifp, ah, 0, linkhdr, &linkhdrsize); 1100 1101 /* 1102 * arp_fillheader() may fail due to lack of support inside encap request 1103 * routing. This is not necessary an error, AF_ARP can/should be handled 1104 * by if_output(). 1105 */ 1106 if (error != 0 && error != EAFNOSUPPORT) { 1107 ARP_LOG(LOG_ERR, "Failed to calculate ARP header on %s: %d\n", 1108 if_name(ifp), error); 1109 goto drop; 1110 } 1111 1112 ro.ro_prepend = linkhdr; 1113 ro.ro_plen = linkhdrsize; 1114 ro.ro_flags = 0; 1115 1116 m_clrprotoflags(m); /* Avoid confusing lower layers. */ 1117 (*ifp->if_output)(ifp, m, &sa, &ro); 1118 ARPSTAT_INC(txreplies); 1119 return; 1120 1121 drop: 1122 m_freem(m); 1123 } 1124 #endif 1125 1126 static struct mbuf * 1127 arp_grab_holdchain(struct llentry *la) 1128 { 1129 struct mbuf *chain; 1130 1131 LLE_WLOCK_ASSERT(la); 1132 1133 chain = la->la_hold; 1134 la->la_hold = NULL; 1135 la->la_numheld = 0; 1136 1137 return (chain); 1138 } 1139 1140 static void 1141 arp_flush_holdchain(struct ifnet *ifp, struct llentry *la, struct mbuf *chain) 1142 { 1143 struct mbuf *m_hold, *m_hold_next; 1144 struct sockaddr_in sin; 1145 1146 NET_EPOCH_ASSERT(); 1147 1148 struct route ro = { 1149 .ro_prepend = la->r_linkdata, 1150 .ro_plen = la->r_hdrlen, 1151 }; 1152 1153 lltable_fill_sa_entry(la, (struct sockaddr *)&sin); 1154 1155 for (m_hold = chain; m_hold != NULL; m_hold = m_hold_next) { 1156 m_hold_next = m_hold->m_nextpkt; 1157 m_hold->m_nextpkt = NULL; 1158 /* Avoid confusing lower layers. */ 1159 m_clrprotoflags(m_hold); 1160 (*ifp->if_output)(ifp, m_hold, (struct sockaddr *)&sin, &ro); 1161 } 1162 } 1163 1164 /* 1165 * Checks received arp data against existing @la. 1166 * Updates lle state/performs notification if necessary. 1167 */ 1168 static void 1169 arp_check_update_lle(struct arphdr *ah, struct in_addr isaddr, struct ifnet *ifp, 1170 int bridged, struct llentry *la) 1171 { 1172 uint8_t linkhdr[LLE_MAX_LINKHDR]; 1173 size_t linkhdrsize; 1174 int lladdr_off; 1175 char addrbuf[INET_ADDRSTRLEN]; 1176 1177 LLE_WLOCK_ASSERT(la); 1178 1179 /* the following is not an error when doing bridging */ 1180 if (!bridged && la->lle_tbl->llt_ifp != ifp) { 1181 if (log_arp_wrong_iface) 1182 ARP_LOG(LOG_WARNING, "%s is on %s " 1183 "but got reply from %*D on %s\n", 1184 inet_ntoa_r(isaddr, addrbuf), 1185 la->lle_tbl->llt_ifp->if_xname, 1186 ifp->if_addrlen, (u_char *)ar_sha(ah), ":", 1187 ifp->if_xname); 1188 LLE_WUNLOCK(la); 1189 return; 1190 } 1191 if ((la->la_flags & LLE_VALID) && 1192 bcmp(ar_sha(ah), la->ll_addr, ifp->if_addrlen)) { 1193 if (la->la_flags & LLE_STATIC) { 1194 LLE_WUNLOCK(la); 1195 if (log_arp_permanent_modify) 1196 ARP_LOG(LOG_ERR, 1197 "%*D attempts to modify " 1198 "permanent entry for %s on %s\n", 1199 ifp->if_addrlen, 1200 (u_char *)ar_sha(ah), ":", 1201 inet_ntoa_r(isaddr, addrbuf), 1202 ifp->if_xname); 1203 return; 1204 } 1205 if (log_arp_movements) { 1206 ARP_LOG(LOG_INFO, "%s moved from %*D " 1207 "to %*D on %s\n", 1208 inet_ntoa_r(isaddr, addrbuf), 1209 ifp->if_addrlen, 1210 (u_char *)la->ll_addr, ":", 1211 ifp->if_addrlen, (u_char *)ar_sha(ah), ":", 1212 ifp->if_xname); 1213 } 1214 } 1215 1216 /* Calculate full link prepend to use in lle */ 1217 linkhdrsize = sizeof(linkhdr); 1218 if (lltable_calc_llheader(ifp, AF_INET, ar_sha(ah), linkhdr, 1219 &linkhdrsize, &lladdr_off) != 0) { 1220 LLE_WUNLOCK(la); 1221 return; 1222 } 1223 1224 /* Check if something has changed */ 1225 if (memcmp(la->r_linkdata, linkhdr, linkhdrsize) != 0 || 1226 (la->la_flags & LLE_VALID) == 0) { 1227 /* Try to perform LLE update */ 1228 if (lltable_try_set_entry_addr(ifp, la, linkhdr, linkhdrsize, 1229 lladdr_off) == 0) { 1230 LLE_WUNLOCK(la); 1231 return; 1232 } 1233 1234 /* Clear fast path feedback request if set */ 1235 llentry_mark_used(la); 1236 } 1237 1238 arp_mark_lle_reachable(la, ifp); 1239 1240 /* 1241 * The packets are all freed within the call to the output 1242 * routine. 1243 * 1244 * NB: The lock MUST be released before the call to the 1245 * output routine. 1246 */ 1247 if (la->la_hold != NULL) { 1248 struct mbuf *chain; 1249 1250 chain = arp_grab_holdchain(la); 1251 LLE_WUNLOCK(la); 1252 arp_flush_holdchain(ifp, la, chain); 1253 } else 1254 LLE_WUNLOCK(la); 1255 } 1256 1257 static void 1258 arp_mark_lle_reachable(struct llentry *la, struct ifnet *ifp) 1259 { 1260 int canceled, wtime; 1261 1262 LLE_WLOCK_ASSERT(la); 1263 1264 la->ln_state = ARP_LLINFO_REACHABLE; 1265 EVENTHANDLER_INVOKE(lle_event, la, LLENTRY_RESOLVED); 1266 1267 if ((ifp->if_flags & IFF_STICKYARP) != 0) 1268 la->la_flags |= LLE_STATIC; 1269 1270 if (!(la->la_flags & LLE_STATIC)) { 1271 LLE_ADDREF(la); 1272 la->la_expire = time_uptime + V_arpt_keep; 1273 wtime = V_arpt_keep - V_arp_maxtries * V_arpt_rexmit; 1274 if (wtime < 0) 1275 wtime = V_arpt_keep; 1276 canceled = callout_reset(&la->lle_timer, 1277 hz * wtime, arptimer, la); 1278 if (canceled) 1279 LLE_REMREF(la); 1280 } 1281 la->la_asked = 0; 1282 la->la_preempt = V_arp_maxtries; 1283 } 1284 1285 /* 1286 * Add permanent link-layer record for given interface address. 1287 */ 1288 static __noinline void 1289 arp_add_ifa_lle(struct ifnet *ifp, const struct sockaddr *dst) 1290 { 1291 struct llentry *lle, *lle_tmp; 1292 1293 /* 1294 * Interface address LLE record is considered static 1295 * because kernel code relies on LLE_STATIC flag to check 1296 * if these entries can be rewriten by arp updates. 1297 */ 1298 lle = lltable_alloc_entry(LLTABLE(ifp), LLE_IFADDR | LLE_STATIC, dst); 1299 if (lle == NULL) { 1300 log(LOG_INFO, "arp_ifinit: cannot create arp " 1301 "entry for interface address\n"); 1302 return; 1303 } 1304 1305 IF_AFDATA_WLOCK(ifp); 1306 LLE_WLOCK(lle); 1307 /* Unlink any entry if exists */ 1308 lle_tmp = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst); 1309 if (lle_tmp != NULL) 1310 lltable_unlink_entry(LLTABLE(ifp), lle_tmp); 1311 1312 lltable_link_entry(LLTABLE(ifp), lle); 1313 IF_AFDATA_WUNLOCK(ifp); 1314 1315 if (lle_tmp != NULL) 1316 EVENTHANDLER_INVOKE(lle_event, lle_tmp, LLENTRY_EXPIRED); 1317 1318 EVENTHANDLER_INVOKE(lle_event, lle, LLENTRY_RESOLVED); 1319 LLE_WUNLOCK(lle); 1320 if (lle_tmp != NULL) 1321 lltable_free_entry(LLTABLE(ifp), lle_tmp); 1322 } 1323 1324 /* 1325 * Handle the garp_rexmit_count. Like sysctl_handle_int(), but limits the range 1326 * of valid values. 1327 */ 1328 static int 1329 sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS) 1330 { 1331 int error; 1332 int rexmit_count = *(int *)arg1; 1333 1334 error = sysctl_handle_int(oidp, &rexmit_count, 0, req); 1335 1336 /* Enforce limits on any new value that may have been set. */ 1337 if (!error && req->newptr) { 1338 /* A new value was set. */ 1339 if (rexmit_count < 0) { 1340 rexmit_count = 0; 1341 } else if (rexmit_count > MAX_GARP_RETRANSMITS) { 1342 rexmit_count = MAX_GARP_RETRANSMITS; 1343 } 1344 *(int *)arg1 = rexmit_count; 1345 } 1346 1347 return (error); 1348 } 1349 1350 /* 1351 * Retransmit a Gratuitous ARP (GARP) and, if necessary, schedule a callout to 1352 * retransmit it again. A pending callout owns a reference to the ifa. 1353 */ 1354 static void 1355 garp_rexmit(void *arg) 1356 { 1357 struct epoch_tracker et; 1358 struct in_ifaddr *ia = arg; 1359 1360 if (callout_pending(&ia->ia_garp_timer) || 1361 !callout_active(&ia->ia_garp_timer)) { 1362 IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp); 1363 ifa_free(&ia->ia_ifa); 1364 return; 1365 } 1366 1367 NET_EPOCH_ENTER(et); 1368 CURVNET_SET(ia->ia_ifa.ifa_ifp->if_vnet); 1369 1370 /* 1371 * Drop lock while the ARP request is generated. 1372 */ 1373 IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp); 1374 1375 arprequest(ia->ia_ifa.ifa_ifp, &IA_SIN(ia)->sin_addr, 1376 &IA_SIN(ia)->sin_addr, IF_LLADDR(ia->ia_ifa.ifa_ifp)); 1377 1378 /* 1379 * Increment the count of retransmissions. If the count has reached the 1380 * maximum value, stop sending the GARP packets. Otherwise, schedule 1381 * the callout to retransmit another GARP packet. 1382 */ 1383 ++ia->ia_garp_count; 1384 if (ia->ia_garp_count >= V_garp_rexmit_count) { 1385 ifa_free(&ia->ia_ifa); 1386 } else { 1387 int rescheduled; 1388 IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp); 1389 rescheduled = callout_reset(&ia->ia_garp_timer, 1390 (1 << ia->ia_garp_count) * hz, 1391 garp_rexmit, ia); 1392 IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp); 1393 if (rescheduled) { 1394 ifa_free(&ia->ia_ifa); 1395 } 1396 } 1397 1398 CURVNET_RESTORE(); 1399 NET_EPOCH_EXIT(et); 1400 } 1401 1402 /* 1403 * Start the GARP retransmit timer. 1404 * 1405 * A single GARP is always transmitted when an IPv4 address is added 1406 * to an interface and that is usually sufficient. However, in some 1407 * circumstances, such as when a shared address is passed between 1408 * cluster nodes, this single GARP may occasionally be dropped or 1409 * lost. This can lead to neighbors on the network link working with a 1410 * stale ARP cache and sending packets destined for that address to 1411 * the node that previously owned the address, which may not respond. 1412 * 1413 * To avoid this situation, GARP retransmits can be enabled by setting 1414 * the net.link.ether.inet.garp_rexmit_count sysctl to a value greater 1415 * than zero. The setting represents the maximum number of 1416 * retransmissions. The interval between retransmissions is calculated 1417 * using an exponential backoff algorithm, doubling each time, so the 1418 * retransmission intervals are: {1, 2, 4, 8, 16, ...} (seconds). 1419 */ 1420 static void 1421 garp_timer_start(struct ifaddr *ifa) 1422 { 1423 struct in_ifaddr *ia = (struct in_ifaddr *) ifa; 1424 1425 IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp); 1426 ia->ia_garp_count = 0; 1427 if (callout_reset(&ia->ia_garp_timer, (1 << ia->ia_garp_count) * hz, 1428 garp_rexmit, ia) == 0) { 1429 ifa_ref(ifa); 1430 } 1431 IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp); 1432 } 1433 1434 void 1435 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa) 1436 { 1437 struct epoch_tracker et; 1438 const struct sockaddr_in *dst_in; 1439 const struct sockaddr *dst; 1440 1441 if (ifa->ifa_carp != NULL) 1442 return; 1443 1444 dst = ifa->ifa_addr; 1445 dst_in = (const struct sockaddr_in *)dst; 1446 1447 if (ntohl(dst_in->sin_addr.s_addr) == INADDR_ANY) 1448 return; 1449 NET_EPOCH_ENTER(et); 1450 arp_announce_ifaddr(ifp, dst_in->sin_addr, IF_LLADDR(ifp)); 1451 NET_EPOCH_EXIT(et); 1452 if (V_garp_rexmit_count > 0) { 1453 garp_timer_start(ifa); 1454 } 1455 1456 arp_add_ifa_lle(ifp, dst); 1457 } 1458 1459 void 1460 arp_announce_ifaddr(struct ifnet *ifp, struct in_addr addr, u_char *enaddr) 1461 { 1462 1463 if (ntohl(addr.s_addr) != INADDR_ANY) 1464 arprequest(ifp, &addr, &addr, enaddr); 1465 } 1466 1467 /* 1468 * Sends gratuitous ARPs for each ifaddr to notify other 1469 * nodes about the address change. 1470 */ 1471 static __noinline void 1472 arp_handle_ifllchange(struct ifnet *ifp) 1473 { 1474 struct ifaddr *ifa; 1475 1476 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1477 if (ifa->ifa_addr->sa_family == AF_INET) 1478 arp_ifinit(ifp, ifa); 1479 } 1480 } 1481 1482 /* 1483 * A handler for interface link layer address change event. 1484 */ 1485 static void 1486 arp_iflladdr(void *arg __unused, struct ifnet *ifp) 1487 { 1488 /* if_bridge can update its lladdr during if_vmove(), after we've done 1489 * if_detach_internal()/dom_ifdetach(). */ 1490 if (ifp->if_afdata[AF_INET] == NULL) 1491 return; 1492 1493 lltable_update_ifaddr(LLTABLE(ifp)); 1494 1495 if ((ifp->if_flags & IFF_UP) != 0) 1496 arp_handle_ifllchange(ifp); 1497 } 1498 1499 static void 1500 vnet_arp_init(void) 1501 { 1502 1503 if (IS_DEFAULT_VNET(curvnet)) { 1504 netisr_register(&arp_nh); 1505 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 1506 arp_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 1507 } 1508 #ifdef VIMAGE 1509 else 1510 netisr_register_vnet(&arp_nh); 1511 #endif 1512 } 1513 VNET_SYSINIT(vnet_arp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, 1514 vnet_arp_init, NULL); 1515 1516 #ifdef VIMAGE 1517 /* 1518 * We have to unregister ARP along with IP otherwise we risk doing INADDR_HASH 1519 * lookups after destroying the hash. Ideally this would go on SI_ORDER_3.5. 1520 */ 1521 static void 1522 vnet_arp_destroy(__unused void *arg) 1523 { 1524 1525 netisr_unregister_vnet(&arp_nh); 1526 } 1527 VNET_SYSUNINIT(vnet_arp_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, 1528 vnet_arp_destroy, NULL); 1529 #endif 1530