xref: /freebsd/sys/dev/cxgbe/t4_tpt.c (revision 19d9a9b15178ed7cfe3f463f43e28cce13fc4f94)
1*19d9a9b1SJohn Baldwin /*-
2*19d9a9b1SJohn Baldwin  * SPDX-License-Identifier: BSD-2-Clause
3*19d9a9b1SJohn Baldwin  *
4*19d9a9b1SJohn Baldwin  * Copyright (c) 2023 Chelsio Communications, Inc.
5*19d9a9b1SJohn Baldwin  * Written by: John Baldwin <jhb@FreeBSD.org>
6*19d9a9b1SJohn Baldwin  *
7*19d9a9b1SJohn Baldwin  * Redistribution and use in source and binary forms, with or without
8*19d9a9b1SJohn Baldwin  * modification, are permitted provided that the following conditions
9*19d9a9b1SJohn Baldwin  * are met:
10*19d9a9b1SJohn Baldwin  * 1. Redistributions of source code must retain the above copyright
11*19d9a9b1SJohn Baldwin  *    notice, this list of conditions and the following disclaimer.
12*19d9a9b1SJohn Baldwin  * 2. Redistributions in binary form must reproduce the above copyright
13*19d9a9b1SJohn Baldwin  *    notice, this list of conditions and the following disclaimer in the
14*19d9a9b1SJohn Baldwin  *    documentation and/or other materials provided with the distribution.
15*19d9a9b1SJohn Baldwin  *
16*19d9a9b1SJohn Baldwin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17*19d9a9b1SJohn Baldwin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18*19d9a9b1SJohn Baldwin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19*19d9a9b1SJohn Baldwin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20*19d9a9b1SJohn Baldwin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*19d9a9b1SJohn Baldwin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22*19d9a9b1SJohn Baldwin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23*19d9a9b1SJohn Baldwin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24*19d9a9b1SJohn Baldwin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25*19d9a9b1SJohn Baldwin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26*19d9a9b1SJohn Baldwin  * SUCH DAMAGE.
27*19d9a9b1SJohn Baldwin  */
28*19d9a9b1SJohn Baldwin 
29*19d9a9b1SJohn Baldwin #include "common/common.h"
30*19d9a9b1SJohn Baldwin 
31*19d9a9b1SJohn Baldwin /*
32*19d9a9b1SJohn Baldwin  * Support routines to manage TPT entries used for both RDMA and NVMe
33*19d9a9b1SJohn Baldwin  * offloads.  This includes allocating STAG indices and managing the
34*19d9a9b1SJohn Baldwin  * PBL pool.
35*19d9a9b1SJohn Baldwin  */
36*19d9a9b1SJohn Baldwin 
37*19d9a9b1SJohn Baldwin #define T4_ULPTX_MIN_IO 32
38*19d9a9b1SJohn Baldwin #define T4_MAX_INLINE_SIZE 96
39*19d9a9b1SJohn Baldwin #define T4_ULPTX_MAX_DMA 1024
40*19d9a9b1SJohn Baldwin 
41*19d9a9b1SJohn Baldwin /* PBL and STAG Memory Managers. */
42*19d9a9b1SJohn Baldwin 
43*19d9a9b1SJohn Baldwin #define MIN_PBL_SHIFT 5			/* 32B == min PBL size (4 entries) */
44*19d9a9b1SJohn Baldwin 
45*19d9a9b1SJohn Baldwin uint32_t
46*19d9a9b1SJohn Baldwin t4_pblpool_alloc(struct adapter *sc, int size)
47*19d9a9b1SJohn Baldwin {
48*19d9a9b1SJohn Baldwin 	vmem_addr_t addr;
49*19d9a9b1SJohn Baldwin 
50*19d9a9b1SJohn Baldwin 	if (vmem_xalloc(sc->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
51*19d9a9b1SJohn Baldwin 	    4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_FIRSTFIT | M_NOWAIT,
52*19d9a9b1SJohn Baldwin 	    &addr) != 0)
53*19d9a9b1SJohn Baldwin 		return (0);
54*19d9a9b1SJohn Baldwin #ifdef VERBOSE_TRACES
55*19d9a9b1SJohn Baldwin 	CTR(KTR_CXGBE, "%s: addr 0x%lx size %d", __func__, addr, size);
56*19d9a9b1SJohn Baldwin #endif
57*19d9a9b1SJohn Baldwin 	return (addr);
58*19d9a9b1SJohn Baldwin }
59*19d9a9b1SJohn Baldwin 
60*19d9a9b1SJohn Baldwin void
61*19d9a9b1SJohn Baldwin t4_pblpool_free(struct adapter *sc, uint32_t addr, int size)
62*19d9a9b1SJohn Baldwin {
63*19d9a9b1SJohn Baldwin #ifdef VERBOSE_TRACES
64*19d9a9b1SJohn Baldwin 	CTR(KTR_CXGBE, "%s: addr 0x%x size %d", __func__, addr, size);
65*19d9a9b1SJohn Baldwin #endif
66*19d9a9b1SJohn Baldwin 	vmem_xfree(sc->pbl_arena, addr, roundup(size, (1 << MIN_PBL_SHIFT)));
67*19d9a9b1SJohn Baldwin }
68*19d9a9b1SJohn Baldwin 
69*19d9a9b1SJohn Baldwin uint32_t
70*19d9a9b1SJohn Baldwin t4_stag_alloc(struct adapter *sc, int size)
71*19d9a9b1SJohn Baldwin {
72*19d9a9b1SJohn Baldwin 	vmem_addr_t stag_idx;
73*19d9a9b1SJohn Baldwin 
74*19d9a9b1SJohn Baldwin 	if (vmem_alloc(sc->stag_arena, size, M_FIRSTFIT | M_NOWAIT,
75*19d9a9b1SJohn Baldwin 	    &stag_idx) != 0)
76*19d9a9b1SJohn Baldwin 		return (T4_STAG_UNSET);
77*19d9a9b1SJohn Baldwin #ifdef VERBOSE_TRACES
78*19d9a9b1SJohn Baldwin 	CTR(KTR_CXGBE, "%s: idx 0x%lx size %d", __func__, stag_idx, size);
79*19d9a9b1SJohn Baldwin #endif
80*19d9a9b1SJohn Baldwin 	return (stag_idx);
81*19d9a9b1SJohn Baldwin }
82*19d9a9b1SJohn Baldwin 
83*19d9a9b1SJohn Baldwin void
84*19d9a9b1SJohn Baldwin t4_stag_free(struct adapter *sc, uint32_t stag_idx, int size)
85*19d9a9b1SJohn Baldwin {
86*19d9a9b1SJohn Baldwin #ifdef VERBOSE_TRACES
87*19d9a9b1SJohn Baldwin 	CTR(KTR_CXGBE, "%s: idx 0x%x size %d", __func__, stag_idx, size);
88*19d9a9b1SJohn Baldwin #endif
89*19d9a9b1SJohn Baldwin 	vmem_free(sc->stag_arena, stag_idx, size);
90*19d9a9b1SJohn Baldwin }
91*19d9a9b1SJohn Baldwin 
92*19d9a9b1SJohn Baldwin void
93*19d9a9b1SJohn Baldwin t4_init_tpt(struct adapter *sc)
94*19d9a9b1SJohn Baldwin {
95*19d9a9b1SJohn Baldwin 	if (sc->vres.pbl.size != 0)
96*19d9a9b1SJohn Baldwin 		sc->pbl_arena = vmem_create("PBL_MEM_POOL", sc->vres.pbl.start,
97*19d9a9b1SJohn Baldwin 		    sc->vres.pbl.size, 1, 0, M_FIRSTFIT | M_WAITOK);
98*19d9a9b1SJohn Baldwin 	if (sc->vres.stag.size != 0)
99*19d9a9b1SJohn Baldwin 		sc->stag_arena = vmem_create("STAG", 1,
100*19d9a9b1SJohn Baldwin 		    sc->vres.stag.size >> 5, 1, 0, M_FIRSTFIT | M_WAITOK);
101*19d9a9b1SJohn Baldwin }
102*19d9a9b1SJohn Baldwin 
103*19d9a9b1SJohn Baldwin void
104*19d9a9b1SJohn Baldwin t4_free_tpt(struct adapter *sc)
105*19d9a9b1SJohn Baldwin {
106*19d9a9b1SJohn Baldwin 	if (sc->pbl_arena != NULL)
107*19d9a9b1SJohn Baldwin 		vmem_destroy(sc->pbl_arena);
108*19d9a9b1SJohn Baldwin 	if (sc->stag_arena != NULL)
109*19d9a9b1SJohn Baldwin 		vmem_destroy(sc->stag_arena);
110*19d9a9b1SJohn Baldwin }
111*19d9a9b1SJohn Baldwin 
112*19d9a9b1SJohn Baldwin /*
113*19d9a9b1SJohn Baldwin  * TPT support routines.  TPT entries are stored in the STAG adapter
114*19d9a9b1SJohn Baldwin  * memory region and are written to via ULP_TX_MEM_WRITE commands in
115*19d9a9b1SJohn Baldwin  * FW_ULPTX_WR work requests.
116*19d9a9b1SJohn Baldwin  */
117*19d9a9b1SJohn Baldwin 
118*19d9a9b1SJohn Baldwin void
119*19d9a9b1SJohn Baldwin t4_write_mem_dma_wr(struct adapter *sc, void *wr, int wr_len, int tid,
120*19d9a9b1SJohn Baldwin     uint32_t addr, uint32_t len, vm_paddr_t data, uint64_t cookie)
121*19d9a9b1SJohn Baldwin {
122*19d9a9b1SJohn Baldwin 	struct ulp_mem_io *ulpmc;
123*19d9a9b1SJohn Baldwin 	struct ulptx_sgl *sgl;
124*19d9a9b1SJohn Baldwin 
125*19d9a9b1SJohn Baldwin 	MPASS(wr_len == T4_WRITE_MEM_DMA_LEN);
126*19d9a9b1SJohn Baldwin 
127*19d9a9b1SJohn Baldwin 	addr &= 0x7FFFFFF;
128*19d9a9b1SJohn Baldwin 
129*19d9a9b1SJohn Baldwin 	memset(wr, 0, wr_len);
130*19d9a9b1SJohn Baldwin 	ulpmc = wr;
131*19d9a9b1SJohn Baldwin 	INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
132*19d9a9b1SJohn Baldwin 	if (cookie != 0) {
133*19d9a9b1SJohn Baldwin 		ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
134*19d9a9b1SJohn Baldwin 		ulpmc->wr.wr_lo = cookie;
135*19d9a9b1SJohn Baldwin 	}
136*19d9a9b1SJohn Baldwin 	ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
137*19d9a9b1SJohn Baldwin 	    V_T5_ULP_MEMIO_ORDER(1) |
138*19d9a9b1SJohn Baldwin 	    V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
139*19d9a9b1SJohn Baldwin 	if (chip_id(sc) >= CHELSIO_T7)
140*19d9a9b1SJohn Baldwin 		ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(len >> 5));
141*19d9a9b1SJohn Baldwin 	else
142*19d9a9b1SJohn Baldwin 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(len >> 5));
143*19d9a9b1SJohn Baldwin 	ulpmc->len16 = htobe32((tid << 8) |
144*19d9a9b1SJohn Baldwin 	    DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
145*19d9a9b1SJohn Baldwin 	ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
146*19d9a9b1SJohn Baldwin 
147*19d9a9b1SJohn Baldwin 	sgl = (struct ulptx_sgl *)(ulpmc + 1);
148*19d9a9b1SJohn Baldwin 	sgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1));
149*19d9a9b1SJohn Baldwin 	sgl->len0 = htobe32(len);
150*19d9a9b1SJohn Baldwin 	sgl->addr0 = htobe64(data);
151*19d9a9b1SJohn Baldwin }
152*19d9a9b1SJohn Baldwin 
153*19d9a9b1SJohn Baldwin void
154*19d9a9b1SJohn Baldwin t4_write_mem_inline_wr(struct adapter *sc, void *wr, int wr_len, int tid,
155*19d9a9b1SJohn Baldwin     uint32_t addr, uint32_t len, void *data, uint64_t cookie)
156*19d9a9b1SJohn Baldwin {
157*19d9a9b1SJohn Baldwin 	struct ulp_mem_io *ulpmc;
158*19d9a9b1SJohn Baldwin 	struct ulptx_idata *ulpsc;
159*19d9a9b1SJohn Baldwin 
160*19d9a9b1SJohn Baldwin 	MPASS(len > 0 && len <= T4_MAX_INLINE_SIZE);
161*19d9a9b1SJohn Baldwin 	MPASS(wr_len == T4_WRITE_MEM_INLINE_LEN(len));
162*19d9a9b1SJohn Baldwin 
163*19d9a9b1SJohn Baldwin 	addr &= 0x7FFFFFF;
164*19d9a9b1SJohn Baldwin 
165*19d9a9b1SJohn Baldwin 	memset(wr, 0, wr_len);
166*19d9a9b1SJohn Baldwin 	ulpmc = wr;
167*19d9a9b1SJohn Baldwin 	INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
168*19d9a9b1SJohn Baldwin 
169*19d9a9b1SJohn Baldwin 	if (cookie != 0) {
170*19d9a9b1SJohn Baldwin 		ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
171*19d9a9b1SJohn Baldwin 		ulpmc->wr.wr_lo = cookie;
172*19d9a9b1SJohn Baldwin 	}
173*19d9a9b1SJohn Baldwin 
174*19d9a9b1SJohn Baldwin 	ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
175*19d9a9b1SJohn Baldwin 	    F_T5_ULP_MEMIO_IMM);
176*19d9a9b1SJohn Baldwin 
177*19d9a9b1SJohn Baldwin 	if (chip_id(sc) >= CHELSIO_T7)
178*19d9a9b1SJohn Baldwin 		ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(
179*19d9a9b1SJohn Baldwin 			DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
180*19d9a9b1SJohn Baldwin 	else
181*19d9a9b1SJohn Baldwin 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(
182*19d9a9b1SJohn Baldwin 			DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
183*19d9a9b1SJohn Baldwin 	ulpmc->len16 = htobe32((tid << 8) |
184*19d9a9b1SJohn Baldwin 	    DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
185*19d9a9b1SJohn Baldwin 	ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
186*19d9a9b1SJohn Baldwin 
187*19d9a9b1SJohn Baldwin 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
188*19d9a9b1SJohn Baldwin 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
189*19d9a9b1SJohn Baldwin 	ulpsc->len = htobe32(roundup(len, T4_ULPTX_MIN_IO));
190*19d9a9b1SJohn Baldwin 
191*19d9a9b1SJohn Baldwin 	if (data != NULL)
192*19d9a9b1SJohn Baldwin 		memcpy(ulpsc + 1, data, len);
193*19d9a9b1SJohn Baldwin }
194