Lines Matching +full:lock +full:- +full:state
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
39 #include <sys/lock.h>
67 if (atomic_fetchadd_int(&e->refcnt, 1) == 0) /* 0 -> 1 transition */
68 atomic_subtract_int(&d->nfree, 1);
74 u_int hash, half = d->l2t_size / 2, start = 0;
78 KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
80 sa->sa_family));
82 if (sa->sa_family == AF_INET) {
85 key = &sin->sin_addr;
86 len = sizeof(sin->sin_addr);
90 key = &sin6->sin6_addr;
91 len = sizeof(sin6->sin6_addr);
106 KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
108 sa->sa_family));
110 if (sa->sa_family == AF_INET) {
113 return (e->addr[0] != sin->sin_addr.s_addr);
117 return (memcmp(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr)));
125 KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
127 sa->sa_family));
129 if (sa->sa_family == AF_INET) {
132 e->addr[0] = sin->sin_addr.s_addr;
133 e->ipv6 = 0;
137 memcpy(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr));
138 e->ipv6 = 1;
144 * Must be called with the entry's lock held.
149 mtx_assert(&e->lock, MA_OWNED);
151 STAILQ_INSERT_TAIL(&e->wr_list, wr, link);
159 mtx_assert(&e->lock, MA_OWNED);
161 while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
162 STAILQ_REMOVE_HEAD(&e->wr_list, link);
170 struct tom_data *td = sc->tom_softc;
172 mtx_assert(&e->lock, MA_OWNED);
174 mtx_lock(&td->unsent_wr_lock);
175 STAILQ_CONCAT(&td->unsent_wr_list, &e->wr_list);
176 mtx_unlock(&td->unsent_wr_lock);
178 taskqueue_enqueue(taskqueue_thread, &td->reclaim_wr_resources);
186 mtx_assert(&e->lock, MA_OWNED);
189 * The entry may be in active use (e->refcount > 0) or not. We update
195 (e->state == L2T_STATE_RESOLVING || e->state == L2T_STATE_FAILED)) {
201 e->state = L2T_STATE_FAILED;
207 /* Valid or already-stale entry was deleted (or expired) */
209 KASSERT(e->state == L2T_STATE_VALID ||
210 e->state == L2T_STATE_STALE,
211 ("%s: lladdr NULL, state %d", __func__, e->state));
213 e->state = L2T_STATE_STALE;
215 } else if (e->state == L2T_STATE_RESOLVING ||
216 e->state == L2T_STATE_FAILED ||
217 memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
219 /* unresolved -> resolved; or dmac changed */
221 memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
222 e->vlan = vtag;
224 e->state = L2T_STATE_VALID;
226 e->state = L2T_STATE_VALID;
232 struct tom_data *td = sc->tom_softc;
233 struct toedev *tod = &td->tod;
241 if (e->ipv6 == 0) {
244 sin.sin_addr.s_addr = e->addr[0];
249 memcpy(&sin6.sin6_addr, &e->addr[0], sizeof(e->addr));
254 rc = toe_l2_resolve(tod, e->ifp, sa, dmac, &vtag);
258 mtx_lock(&e->lock);
260 mtx_unlock(&e->lock);
270 switch (e->state) {
277 case L2T_STATE_VALID: /* fast-path, send the packet on */
285 mtx_lock(&e->lock);
286 if (e->state != L2T_STATE_SYNC_WRITE &&
287 e->state != L2T_STATE_RESOLVING) {
288 /* state changed by the time we got here */
289 mtx_unlock(&e->lock);
296 mtx_unlock(&e->lock);
301 mtx_lock(&e->lock);
302 if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
304 if (e->state == L2T_STATE_FAILED)
306 mtx_unlock(&e->lock);
320 struct adapter *sc = iq->adapter;
325 MPASS(iq->abs_id == G_TID_QID(GET_TID(rpl)));
327 if (__predict_false(hwidx < sc->vres.l2t.start) ||
328 __predict_false(hwidx >= sc->vres.l2t.start + sc->vres.l2t.size) ||
329 __predict_false(rpl->status != CPL_ERR_NONE)) {
331 __func__, hwidx, rpl->status, sync, sc->vres.l2t.start,
332 sc->vres.l2t.size);
337 const u_int idx = hwidx - sc->vres.l2t.start;
338 struct l2t_entry *e = &sc->l2t->l2tab[idx];
340 mtx_lock(&e->lock);
341 if (e->state != L2T_STATE_SWITCHING) {
343 e->state = L2T_STATE_VALID;
345 mtx_unlock(&e->lock);
353 * the specified port. Produce such an entry - create one if needed.
355 * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
362 struct adapter *sc = pi->adapter;
363 struct l2t_data *d = sc->l2t;
367 KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
369 sa->sa_family));
383 rw_wlock(&d->lock);
384 if (__predict_false(d->l2t_stopped)) {
388 for (e = d->l2tab[hash].first; e; e = e->next) {
389 if (l2_cmp(sa, e) == 0 && e->ifp == ifp && e->vlan == vtag) {
398 mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
399 e->next = d->l2tab[hash].first;
400 d->l2tab[hash].first = e;
402 e->state = L2T_STATE_RESOLVING;
404 e->ifp = ifp;
405 e->hash = hash;
406 e->hw_port = pi->hw_port;
407 e->wrq = &sc->sge.ctrlq[pi->port_id];
408 e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
409 atomic_store_rel_int(&e->refcnt, 1);
410 e->vlan = vtag;
411 mtx_unlock(&e->lock);
414 rw_wunlock(&d->lock);
426 struct adapter *sc = tod->tod_softc;
428 struct l2t_data *d = sc->l2t;
434 rw_rlock(&d->lock);
435 if (__predict_false(d->l2t_stopped))
437 for (e = d->l2tab[hash].first; e; e = e->next) {
438 if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
439 mtx_lock(&e->lock);
440 if (atomic_load_acq_int(&e->refcnt))
442 if (e->state == L2T_STATE_VALID)
443 e->state = L2T_STATE_STALE;
444 mtx_unlock(&e->lock);
449 rw_runlock(&d->lock);
459 rw_runlock(&d->lock);
461 KASSERT(e->state != L2T_STATE_UNUSED,
465 mtx_unlock(&e->lock);