xref: /freebsd/sys/dev/qat/qat_common/adf_hw_arbiter.c (revision 1719886f6d08408b834d270c59ffcfd821c8f63a)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_transport_access_macros.h"
12 #include "adf_transport_internal.h"
13 #include "adf_accel_devices.h"
14 #include "adf_common_drv.h"
15 #include "adf_transport_internal.h"
16 
17 #define ADF_ARB_NUM 4
18 #define ADF_ARB_REG_SIZE 0x4
19 #define ADF_ARB_WTR_SIZE 0x20
20 #define ADF_ARB_OFFSET 0x30000
21 #define ADF_ARB_REG_SLOT 0x1000
22 #define ADF_ARB_WTR_OFFSET 0x010
23 #define ADF_ARB_RO_EN_OFFSET 0x090
24 #define ADF_ARB_WQCFG_OFFSET 0x100
25 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
26 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
27 
28 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value)                     \
29 	ADF_CSR_WR(csr_addr,                                                   \
30 		   ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \
31 		   value)
32 
33 #define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value)            \
34 	ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value)
35 #define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index)                             \
36 	ADF_CSR_RD(csr_addr,                                                   \
37 		   ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)))
38 
39 static DEFINE_MUTEX(csr_arb_lock);
40 
41 #define WRITE_CSR_ARB_WRK_2_SER_MAP(                                           \
42     csr_addr, csr_offset, wrk_to_ser_map_offset, index, value)                 \
43 	ADF_CSR_WR(csr_addr,                                                   \
44 		   ((csr_offset) + (wrk_to_ser_map_offset)) +                  \
45 		       (ADF_ARB_REG_SIZE * (index)),                           \
46 		   value)
47 
48 int
49 adf_init_arb(struct adf_accel_dev *accel_dev)
50 {
51 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
52 	struct arb_info info;
53 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
54 	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
55 	u32 arb;
56 
57 	hw_data->get_arb_info(&info);
58 
59 	/* Service arb configured for 32 bytes responses and
60 	 * ring flow control check enabled.
61 	 */
62 	for (arb = 0; arb < ADF_ARB_NUM; arb++)
63 		WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg);
64 
65 	return 0;
66 }
67 
68 int
69 adf_init_gen2_arb(struct adf_accel_dev *accel_dev)
70 {
71 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
72 	struct arb_info info;
73 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
74 	u32 i;
75 	const u32 *thd_2_arb_cfg;
76 
77 	/* invoke common adf_init_arb */
78 	adf_init_arb(accel_dev);
79 
80 	hw_data->get_arb_info(&info);
81 
82 	/* Map worker threads to service arbiters */
83 	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
84 	if (!thd_2_arb_cfg)
85 		return EFAULT;
86 
87 	for (i = 0; i < hw_data->num_engines; i++)
88 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
89 					    info.arbiter_offset,
90 					    info.wrk_thd_2_srv_arb_map,
91 					    i,
92 					    *(thd_2_arb_cfg + i));
93 	return 0;
94 }
95 
96 void
97 adf_update_ring_arb(struct adf_etr_ring_data *ring)
98 {
99 	int shift;
100 	u32 arben, arben_tx, arben_rx, arb_mask;
101 	struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
102 	struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
103 	struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
104 
105 	arb_mask = csr_info->arb_enable_mask;
106 	shift = hweight32(arb_mask);
107 
108 	arben_tx = ring->bank->ring_mask & arb_mask;
109 	arben_rx = (ring->bank->ring_mask >> shift) & arb_mask;
110 	arben = arben_tx & arben_rx;
111 	csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
112 					   ring->bank->bank_number,
113 					   arben);
114 }
115 
116 void
117 adf_update_uio_ring_arb(struct adf_uio_control_bundle *bundle)
118 {
119 	int shift;
120 	u32 arben, arben_tx, arben_rx, arb_mask;
121 	struct adf_accel_dev *accel_dev = bundle->uio_priv.accel->accel_dev;
122 	struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
123 	struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
124 
125 	arb_mask = csr_info->arb_enable_mask;
126 	shift = hweight32(arb_mask);
127 
128 	arben_tx = bundle->rings_enabled & arb_mask;
129 	arben_rx = (bundle->rings_enabled >> shift) & arb_mask;
130 	arben = arben_tx & arben_rx;
131 	csr_ops->write_csr_ring_srv_arb_en(bundle->csr_addr,
132 					   bundle->hardware_bundle_number,
133 					   arben);
134 }
135 void
136 adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
137 		    void *csr_addr,
138 		    unsigned int bank_nr,
139 		    unsigned int mask)
140 {
141 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
142 	u32 arbenable;
143 
144 	if (!csr_addr)
145 		return;
146 
147 	mutex_lock(&csr_arb_lock);
148 	arbenable = csr_ops->read_csr_ring_srv_arb_en(csr_addr, bank_nr);
149 	arbenable |= mask & 0xFF;
150 	csr_ops->write_csr_ring_srv_arb_en(csr_addr, bank_nr, arbenable);
151 
152 	mutex_unlock(&csr_arb_lock);
153 }
154 
155 void
156 adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
157 		     void *csr_addr,
158 		     unsigned int bank_nr,
159 		     unsigned int mask)
160 {
161 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
162 	struct resource *csr = csr_addr;
163 	u32 arbenable;
164 
165 	if (!csr_addr)
166 		return;
167 
168 	mutex_lock(&csr_arb_lock);
169 	arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
170 	arbenable &= ~mask & 0xFF;
171 	csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
172 	mutex_unlock(&csr_arb_lock);
173 }
174 
175 void
176 adf_exit_arb(struct adf_accel_dev *accel_dev)
177 {
178 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
179 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
180 	struct arb_info info;
181 	struct resource *csr;
182 	unsigned int i;
183 
184 	if (!accel_dev->transport)
185 		return;
186 
187 	csr = accel_dev->transport->banks[0].csr_addr;
188 
189 	hw_data->get_arb_info(&info);
190 
191 	/* Reset arbiter configuration */
192 	for (i = 0; i < ADF_ARB_NUM; i++)
193 		WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0);
194 
195 	/* Unmap worker threads to service arbiters */
196 	if (hw_data->get_arb_mapping) {
197 		for (i = 0; i < hw_data->num_engines; i++)
198 			WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
199 						    info.arbiter_offset,
200 						    info.wrk_thd_2_srv_arb_map,
201 						    i,
202 						    0);
203 	}
204 
205 	/* Disable arbitration on all rings */
206 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
207 		csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
208 }
209 
210 void
211 adf_disable_arb(struct adf_accel_dev *accel_dev)
212 {
213 	struct adf_hw_csr_ops *csr_ops;
214 	struct resource *csr;
215 	unsigned int i;
216 
217 	if (!accel_dev || !accel_dev->transport)
218 		return;
219 
220 	csr = accel_dev->transport->banks[0].csr_addr;
221 	csr_ops = GET_CSR_OPS(accel_dev);
222 
223 	/* Disable arbitration on all rings */
224 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
225 		csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
226 }
227