xref: /freebsd/sys/dev/qat/qat_common/adf_hw_arbiter.c (revision 66fd12cf4896eb08ad8e7a2627537f84ead84dd3)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "adf_transport_internal.h"
17 
18 #define ADF_ARB_NUM 4
19 #define ADF_ARB_REG_SIZE 0x4
20 #define ADF_ARB_WTR_SIZE 0x20
21 #define ADF_ARB_OFFSET 0x30000
22 #define ADF_ARB_REG_SLOT 0x1000
23 #define ADF_ARB_WTR_OFFSET 0x010
24 #define ADF_ARB_RO_EN_OFFSET 0x090
25 #define ADF_ARB_WQCFG_OFFSET 0x100
26 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
27 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
28 
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value)                     \
30 	ADF_CSR_WR(csr_addr,                                                   \
31 		   ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \
32 		   value)
33 
34 #define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value)            \
35 	ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value)
36 #define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index)                             \
37 	ADF_CSR_RD(csr_addr,                                                   \
38 		   ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)))
39 
40 static DEFINE_MUTEX(csr_arb_lock);
41 
42 #define WRITE_CSR_ARB_WRK_2_SER_MAP(                                           \
43     csr_addr, csr_offset, wrk_to_ser_map_offset, index, value)                 \
44 	ADF_CSR_WR(csr_addr,                                                   \
45 		   ((csr_offset) + (wrk_to_ser_map_offset)) +                  \
46 		       (ADF_ARB_REG_SIZE * (index)),                           \
47 		   value)
48 
49 int
50 adf_init_arb(struct adf_accel_dev *accel_dev)
51 {
52 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
53 	struct arb_info info;
54 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
55 	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
56 	u32 arb;
57 
58 	hw_data->get_arb_info(&info);
59 
60 	/* Service arb configured for 32 bytes responses and
61 	 * ring flow control check enabled.
62 	 */
63 	for (arb = 0; arb < ADF_ARB_NUM; arb++)
64 		WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg);
65 
66 	return 0;
67 }
68 
69 int
70 adf_init_gen2_arb(struct adf_accel_dev *accel_dev)
71 {
72 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
73 	struct arb_info info;
74 	struct resource *csr = accel_dev->transport->banks[0].csr_addr;
75 	u32 i;
76 	const u32 *thd_2_arb_cfg;
77 
78 	/* invoke common adf_init_arb */
79 	adf_init_arb(accel_dev);
80 
81 	hw_data->get_arb_info(&info);
82 
83 	/* Map worker threads to service arbiters */
84 	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
85 	if (!thd_2_arb_cfg)
86 		return EFAULT;
87 
88 	for (i = 0; i < hw_data->num_engines; i++)
89 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
90 					    info.arbiter_offset,
91 					    info.wrk_thd_2_srv_arb_map,
92 					    i,
93 					    *(thd_2_arb_cfg + i));
94 	return 0;
95 }
96 
97 void
98 adf_update_ring_arb(struct adf_etr_ring_data *ring)
99 {
100 	int shift;
101 	u32 arben, arben_tx, arben_rx, arb_mask;
102 	struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
103 	struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
104 	struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
105 
106 	arb_mask = csr_info->arb_enable_mask;
107 	shift = hweight32(arb_mask);
108 
109 	arben_tx = ring->bank->ring_mask & arb_mask;
110 	arben_rx = (ring->bank->ring_mask >> shift) & arb_mask;
111 	arben = arben_tx & arben_rx;
112 	csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
113 					   ring->bank->bank_number,
114 					   arben);
115 }
116 
117 void
118 adf_update_uio_ring_arb(struct adf_uio_control_bundle *bundle)
119 {
120 	int shift;
121 	u32 arben, arben_tx, arben_rx, arb_mask;
122 	struct adf_accel_dev *accel_dev = bundle->uio_priv.accel->accel_dev;
123 	struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
124 	struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
125 
126 	arb_mask = csr_info->arb_enable_mask;
127 	shift = hweight32(arb_mask);
128 
129 	arben_tx = bundle->rings_enabled & arb_mask;
130 	arben_rx = (bundle->rings_enabled >> shift) & arb_mask;
131 	arben = arben_tx & arben_rx;
132 	csr_ops->write_csr_ring_srv_arb_en(bundle->csr_addr,
133 					   bundle->hardware_bundle_number,
134 					   arben);
135 }
136 void
137 adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
138 		    void *csr_addr,
139 		    unsigned int bank_nr,
140 		    unsigned int mask)
141 {
142 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
143 	u32 arbenable;
144 
145 	if (!csr_addr)
146 		return;
147 
148 	mutex_lock(&csr_arb_lock);
149 	arbenable = csr_ops->read_csr_ring_srv_arb_en(csr_addr, bank_nr);
150 	arbenable |= mask & 0xFF;
151 	csr_ops->write_csr_ring_srv_arb_en(csr_addr, bank_nr, arbenable);
152 
153 	mutex_unlock(&csr_arb_lock);
154 }
155 
156 void
157 adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
158 		     void *csr_addr,
159 		     unsigned int bank_nr,
160 		     unsigned int mask)
161 {
162 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
163 	struct resource *csr = csr_addr;
164 	u32 arbenable;
165 
166 	if (!csr_addr)
167 		return;
168 
169 	mutex_lock(&csr_arb_lock);
170 	arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
171 	arbenable &= ~mask & 0xFF;
172 	csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
173 	mutex_unlock(&csr_arb_lock);
174 }
175 
176 void
177 adf_exit_arb(struct adf_accel_dev *accel_dev)
178 {
179 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
180 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
181 	struct arb_info info;
182 	struct resource *csr;
183 	unsigned int i;
184 
185 	if (!accel_dev->transport)
186 		return;
187 
188 	csr = accel_dev->transport->banks[0].csr_addr;
189 
190 	hw_data->get_arb_info(&info);
191 
192 	/* Reset arbiter configuration */
193 	for (i = 0; i < ADF_ARB_NUM; i++)
194 		WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0);
195 
196 	/* Unmap worker threads to service arbiters */
197 	if (hw_data->get_arb_mapping) {
198 		for (i = 0; i < hw_data->num_engines; i++)
199 			WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
200 						    info.arbiter_offset,
201 						    info.wrk_thd_2_srv_arb_map,
202 						    i,
203 						    0);
204 	}
205 
206 	/* Disable arbitration on all rings */
207 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
208 		csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
209 }
210 
211 void
212 adf_disable_arb(struct adf_accel_dev *accel_dev)
213 {
214 	struct adf_hw_csr_ops *csr_ops;
215 	struct resource *csr;
216 	unsigned int i;
217 
218 	if (!accel_dev || !accel_dev->transport)
219 		return;
220 
221 	csr = accel_dev->transport->banks[0].csr_addr;
222 	csr_ops = GET_CSR_OPS(accel_dev);
223 
224 	/* Disable arbitration on all rings */
225 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
226 		csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
227 }
228