Lines Matching full:ring

96 safexcel_next_request(struct safexcel_ring *ring)  in safexcel_next_request()  argument
100 i = ring->cdr.read; in safexcel_next_request()
103 return (&ring->requests[i]); in safexcel_next_request()
107 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring) in safexcel_cmd_descr_next() argument
111 if (ring->write == ring->read) in safexcel_cmd_descr_next()
113 cdesc = &ring->desc[ring->read]; in safexcel_cmd_descr_next()
114 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE; in safexcel_cmd_descr_next()
119 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring) in safexcel_res_descr_next() argument
123 if (ring->write == ring->read) in safexcel_res_descr_next()
125 rdesc = &ring->desc[ring->read]; in safexcel_res_descr_next()
126 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE; in safexcel_res_descr_next()
131 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring) in safexcel_alloc_request() argument
135 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_alloc_request()
137 i = ring->cdr.write; in safexcel_alloc_request()
138 if ((i + 1) % SAFEXCEL_RING_SIZE == ring->cdr.read) in safexcel_alloc_request()
140 return (&ring->requests[i]); in safexcel_alloc_request()
144 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req) in safexcel_free_request() argument
148 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_free_request()
151 bus_dmamap_unload(ring->data_dtag, req->dmap); in safexcel_free_request()
167 struct safexcel_ring *ring; in safexcel_rdr_intr() local
171 ring = &sc->sc_ring[ringidx]; in safexcel_rdr_intr()
179 "zero pending requests on ring %d\n", ringidx); in safexcel_rdr_intr()
180 mtx_lock(&ring->mtx); in safexcel_rdr_intr()
186 ring = &sc->sc_ring[ringidx]; in safexcel_rdr_intr()
187 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map, in safexcel_rdr_intr()
189 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map, in safexcel_rdr_intr()
191 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map, in safexcel_rdr_intr()
196 req = safexcel_next_request(ring); in safexcel_rdr_intr()
200 bus_dmamap_sync(ring->data_dtag, req->dmap, in safexcel_rdr_intr()
204 cdesc = safexcel_cmd_descr_next(&ring->cdr); in safexcel_rdr_intr()
213 rdesc = safexcel_res_descr_next(&ring->rdr); in safexcel_rdr_intr()
230 mtx_lock(&ring->mtx); in safexcel_rdr_intr()
232 KASSERT(ring->queued >= nreqs, in safexcel_rdr_intr()
234 __func__, ring->queued, nreqs)); in safexcel_rdr_intr()
235 ring->queued -= nreqs; in safexcel_rdr_intr()
241 blocked = ring->blocked; in safexcel_rdr_intr()
242 ring->blocked = 0; in safexcel_rdr_intr()
245 if (ring->queued != 0) { in safexcel_rdr_intr()
248 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | imin(ring->queued, 16)); in safexcel_rdr_intr()
250 mtx_unlock(&ring->mtx); in safexcel_rdr_intr()
265 int ring; in safexcel_ring_intr() local
270 ring = ih->ring; in safexcel_ring_intr()
273 SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring)); in safexcel_ring_intr()
275 if (status & SAFEXCEL_CDR_IRQ(ring)) { in safexcel_ring_intr()
277 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT); in safexcel_ring_intr()
279 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT, in safexcel_ring_intr()
284 if (status & SAFEXCEL_RDR_IRQ(ring)) { in safexcel_ring_intr()
286 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT); in safexcel_ring_intr()
290 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT, in safexcel_ring_intr()
294 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring), in safexcel_ring_intr()
298 safexcel_rdr_intr(sc, ring); in safexcel_ring_intr()
314 /* Scan for valid ring interrupt controllers. */ in safexcel_configure()
490 /* Clear interrupts for this ring. */ in safexcel_hw_prepare_rings()
542 /* Ring size. */ in safexcel_hw_prepare_rings()
553 struct safexcel_ring *ring; in safexcel_hw_setup_rings() local
564 ring = &sc->sc_ring[i]; in safexcel_hw_setup_rings()
570 /* Ring base address. */ in safexcel_hw_setup_rings()
573 SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr)); in safexcel_hw_setup_rings()
576 SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr)); in safexcel_hw_setup_rings()
605 /* Ring base address. */ in safexcel_hw_setup_rings()
608 SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr)); in safexcel_hw_setup_rings()
611 SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr)); in safexcel_hw_setup_rings()
637 /* Enable ring interrupt. */ in safexcel_hw_setup_rings()
652 * Result descriptor ring operations. in safexcel_hw_reset_rings()
655 /* Reset ring base address. */ in safexcel_hw_reset_rings()
684 /* Disable ring interrupt. */ in safexcel_hw_reset_rings()
690 * Command descriptor ring operations. in safexcel_hw_reset_rings()
693 /* Reset ring base address. */ in safexcel_hw_reset_rings()
748 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring, in safexcel_execute() argument
754 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_execute()
757 ring->pending++; in safexcel_execute()
758 ring->pending_cdesc += req->cdescs; in safexcel_execute()
759 ring->pending_rdesc += req->rdescs; in safexcel_execute()
765 busy = ring->queued != 0; in safexcel_execute()
766 ncdesc = ring->pending_cdesc + req->cdescs; in safexcel_execute()
767 nrdesc = ring->pending_rdesc + req->rdescs; in safexcel_execute()
768 ring->queued += ring->pending + 1; in safexcel_execute()
773 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | ring->queued); in safexcel_execute()
782 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0; in safexcel_execute()
789 struct safexcel_ring *ring; in safexcel_init_rings() local
794 ring = &sc->sc_ring[i]; in safexcel_init_rings()
796 snprintf(ring->lockname, sizeof(ring->lockname), in safexcel_init_rings()
798 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF); in safexcel_init_rings()
800 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0; in safexcel_init_rings()
801 ring->queued = 0; in safexcel_init_rings()
802 ring->cdr.read = ring->cdr.write = 0; in safexcel_init_rings()
803 ring->rdr.read = ring->rdr.write = 0; in safexcel_init_rings()
805 cdesc = &ring->cdr.desc[j]; in safexcel_init_rings()
806 atok = ring->dma_atok.paddr + in safexcel_init_rings()
890 struct safexcel_ring *ring; in safexcel_dma_free_rings() local
894 ring = &sc->sc_ring[i]; in safexcel_dma_free_rings()
895 safexcel_dma_free_mem(&ring->cdr.dma); in safexcel_dma_free_rings()
896 safexcel_dma_free_mem(&ring->dma_atok); in safexcel_dma_free_rings()
897 safexcel_dma_free_mem(&ring->rdr.dma); in safexcel_dma_free_rings()
898 bus_dma_tag_destroy(ring->data_dtag); in safexcel_dma_free_rings()
899 mtx_destroy(&ring->mtx); in safexcel_dma_free_rings()
906 struct safexcel_ring *ring; in safexcel_dma_init() local
911 ring = &sc->sc_ring[i]; in safexcel_dma_init()
924 &ring->data_dtag); /* dmat */ in safexcel_dma_init()
933 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size); in safexcel_dma_init()
940 ring->cdr.desc = in safexcel_dma_init()
941 (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr; in safexcel_dma_init()
946 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size); in safexcel_dma_init()
956 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size); in safexcel_dma_init()
963 ring->rdr.desc = in safexcel_dma_init()
964 (struct safexcel_res_descr *)ring->rdr.dma.vaddr; in safexcel_dma_init()
1036 sc->sc_ih[i].ring = i; in safexcel_setup_dev_interrupts()
1049 "failed to bind ring %d\n", error); in safexcel_setup_dev_interrupts()
1092 (void)snprintf(name, sizeof(name), "ring%d", i); in safexcel_alloc_dev_resources()
1161 struct safexcel_ring *ring; in safexcel_attach() local
1178 ring = &sc->sc_ring[ringidx]; in safexcel_attach()
1180 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK); in safexcel_attach()
1181 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK); in safexcel_attach()
1184 req = &ring->requests[i]; in safexcel_attach()
1187 if (bus_dmamap_create(ring->data_dtag, in safexcel_attach()
1190 bus_dmamap_destroy(ring->data_dtag, in safexcel_attach()
1191 ring->requests[j].dmap); in safexcel_attach()
1197 bus_dmamap_destroy(ring->data_dtag, in safexcel_attach()
1198 ring->requests[j].dmap); in safexcel_attach()
1200 &ring->requests[j].ctx); in safexcel_attach()
1225 "Number of command descriptor ring overflows"); in safexcel_attach()
1229 "Number of result descriptor ring overflows"); in safexcel_attach()
1249 struct safexcel_ring *ring; in safexcel_detach() local
1263 ring = &sc->sc_ring[ringidx]; in safexcel_detach()
1265 bus_dmamap_destroy(ring->data_dtag, in safexcel_detach()
1266 ring->requests[i].dmap); in safexcel_detach()
1267 safexcel_dma_free_mem(&ring->requests[i].ctx); in safexcel_detach()
1269 sglist_free(ring->cmd_data); in safexcel_detach()
1270 sglist_free(ring->res_data); in safexcel_detach()
1980 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last, in safexcel_res_descr_add() argument
1986 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_res_descr_add()
1988 rring = &ring->rdr; in safexcel_res_descr_add()
2016 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last, in safexcel_cmd_descr_add() argument
2024 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_cmd_descr_add()
2026 cring = &ring->cdr; in safexcel_cmd_descr_add()
2056 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count) in safexcel_cmd_descr_rollback() argument
2060 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_cmd_descr_rollback()
2062 cring = &ring->cdr; in safexcel_cmd_descr_rollback()
2069 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count) in safexcel_res_descr_rollback() argument
2073 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_res_descr_rollback()
2075 rring = &ring->rdr; in safexcel_res_descr_rollback()
2114 struct safexcel_ring *ring; in safexcel_create_chain_cb() local
2130 ring = &req->sc->sc_ring[req->ringidx]; in safexcel_create_chain_cb()
2132 mtx_assert(&ring->mtx, MA_OWNED); in safexcel_create_chain_cb()
2145 sglist_reset(ring->cmd_data); in safexcel_create_chain_cb()
2146 sglist_reset(ring->res_data); in safexcel_create_chain_cb()
2148 safexcel_append_segs(segs, nseg, ring->cmd_data, in safexcel_create_chain_cb()
2151 safexcel_append_segs(segs, nseg, ring->cmd_data, in safexcel_create_chain_cb()
2154 safexcel_append_segs(segs, nseg, ring->res_data, in safexcel_create_chain_cb()
2159 safexcel_append_segs(segs, nseg, ring->cmd_data, in safexcel_create_chain_cb()
2162 safexcel_append_segs(segs, nseg, ring->res_data, in safexcel_create_chain_cb()
2167 sg = ring->cmd_data; in safexcel_create_chain_cb()
2174 * descriptor. Also, we must allocate at least one command ring in safexcel_create_chain_cb()
2175 * entry per request to keep the request shadow ring in sync. in safexcel_create_chain_cb()
2185 cdesc = safexcel_cmd_descr_add(ring, first, last, in safexcel_create_chain_cb()
2189 safexcel_cmd_descr_rollback(ring, i); in safexcel_create_chain_cb()
2199 sg = ring->res_data; in safexcel_create_chain_cb()
2213 if (safexcel_res_descr_add(ring, first, last, in safexcel_create_chain_cb()
2215 safexcel_cmd_descr_rollback(ring, in safexcel_create_chain_cb()
2216 ring->cmd_data->sg_nseg); in safexcel_create_chain_cb()
2217 safexcel_res_descr_rollback(ring, i); in safexcel_create_chain_cb()
2227 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req) in safexcel_create_chain() argument
2234 error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp, in safexcel_create_chain()
2523 struct safexcel_ring *ring; in safexcel_process() local
2538 ring = &sc->sc_ring[curcpu % sc->sc_config.rings]; in safexcel_process()
2539 mtx_lock(&ring->mtx); in safexcel_process()
2540 req = safexcel_alloc_request(sc, ring); in safexcel_process()
2542 ring->blocked = CRYPTO_SYMQ; in safexcel_process()
2543 mtx_unlock(&ring->mtx); in safexcel_process()
2553 error = safexcel_create_chain(ring, req); in safexcel_process()
2555 safexcel_free_request(ring, req); in safexcel_process()
2557 ring->blocked = CRYPTO_SYMQ; in safexcel_process()
2558 mtx_unlock(&ring->mtx); in safexcel_process()
2570 bus_dmamap_sync(ring->data_dtag, req->dmap, in safexcel_process()
2574 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map, in safexcel_process()
2576 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map, in safexcel_process()
2578 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map, in safexcel_process()
2581 safexcel_execute(sc, ring, req, hint); in safexcel_process()
2583 mtx_unlock(&ring->mtx); in safexcel_process()