Lines Matching +full:lock +full:- +full:offset
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
50 G_FW_RI_TPTE_VALID(ntohl(tpte->valid_to_pdid)), in print_tpte()
51 G_FW_RI_TPTE_STAGKEY(ntohl(tpte->valid_to_pdid)), in print_tpte()
52 G_FW_RI_TPTE_STAGSTATE(ntohl(tpte->valid_to_pdid)), in print_tpte()
53 G_FW_RI_TPTE_PDID(ntohl(tpte->valid_to_pdid)), in print_tpte()
54 G_FW_RI_TPTE_PERM(ntohl(tpte->locread_to_qpid)), in print_tpte()
55 G_FW_RI_TPTE_PS(ntohl(tpte->locread_to_qpid)), in print_tpte()
56 (long long)(((u64)ntohl(tpte->len_hi) << 32) | ntohl(tpte->len_lo)), in print_tpte()
57 (long long)(((u64)ntohl(tpte->va_hi) << 32) | ntohl(tpte->va_lo_fbo))); in print_tpte()
67 const u32 offset = sc->vres.stag.start + ((stag >> 8) * 32); in t4_dump_stag() local
69 if (offset > sc->vres.stag.start + sc->vres.stag.size - 32) { in t4_dump_stag()
74 read_via_memwin(sc, 0, offset, (u32 *)&tpte, 32); in t4_dump_stag()
81 const u32 first = sc->vres.stag.start; in t4_dump_all_stag()
82 const u32 last = first + sc->vres.stag.size - 32; in t4_dump_all_stag()
83 u32 offset, i; in t4_dump_all_stag() local
85 for (i = 0, offset = first; offset <= last; i++, offset += 32) { in t4_dump_all_stag()
87 read_via_memwin(sc, 0, offset, (u32 *)&tpte, 4); in t4_dump_all_stag()
89 read_via_memwin(sc, 0, offset, (u32 *)&tpte, 32); in t4_dump_all_stag()
97 struct adapter *sc = dev->rdev.adap; in dump_err_cqe()
127 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || in post_qp_event()
128 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { in post_qp_event()
129 CTR4(KTR_IW_CXGBE, "%s AE received after RTS - " in post_qp_event()
131 qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); in post_qp_event()
137 if (qhp->attr.state == C4IW_QP_STATE_RTS) { in post_qp_event()
139 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, in post_qp_event()
144 event.device = chp->ibcq.device; in post_qp_event()
146 event.element.cq = &chp->ibcq; in post_qp_event()
148 event.element.qp = &qhp->ibqp; in post_qp_event()
149 if (qhp->ibqp.event_handler) in post_qp_event()
150 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event()
152 spin_lock_irqsave(&chp->comp_handler_lock, flag); in post_qp_event()
153 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
154 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); in post_qp_event()
163 spin_lock_irq(&dev->lock); in c4iw_ev_dispatch()
172 spin_unlock_irq(&dev->lock); in c4iw_ev_dispatch()
177 cqid = qhp->attr.scq; in c4iw_ev_dispatch()
179 cqid = qhp->attr.rcq; in c4iw_ev_dispatch()
188 spin_unlock_irq(&dev->lock); in c4iw_ev_dispatch()
192 c4iw_qp_add_ref(&qhp->ibqp); in c4iw_ev_dispatch()
193 atomic_inc(&chp->refcnt); in c4iw_ev_dispatch()
194 spin_unlock_irq(&dev->lock); in c4iw_ev_dispatch()
250 CQE_STATUS(err_cqe), qhp->wq.sq.qid); in c4iw_ev_dispatch()
255 if (atomic_dec_and_test(&chp->refcnt)) in c4iw_ev_dispatch()
256 wake_up(&chp->wait); in c4iw_ev_dispatch()
257 c4iw_qp_rem_ref(&qhp->ibqp); in c4iw_ev_dispatch()
264 struct c4iw_dev *dev = iq->adapter->iwarp_softc; in c4iw_ev_handler()
265 u32 qid = be32_to_cpu(rc->pldbuflen_qid); in c4iw_ev_handler()
269 spin_lock_irqsave(&dev->lock, flag); in c4iw_ev_handler()
272 atomic_inc(&chp->refcnt); in c4iw_ev_handler()
273 spin_unlock_irqrestore(&dev->lock, flag); in c4iw_ev_handler()
275 spin_lock_irqsave(&chp->comp_handler_lock, flag); in c4iw_ev_handler()
276 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in c4iw_ev_handler()
277 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); in c4iw_ev_handler()
278 if (atomic_dec_and_test(&chp->refcnt)) in c4iw_ev_handler()
279 wake_up(&chp->wait); in c4iw_ev_handler()
282 spin_unlock_irqrestore(&dev->lock, flag); in c4iw_ev_handler()