xref: /freebsd/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_uio_control.h"
12 #include "adf_uio_cleanup.h"
13 #include "adf_uio.h"
14 #include "adf_transport_access_macros.h"
15 #include "adf_transport_internal.h"
16 #include <sys/param.h>
17 #include <sys/lock.h>
18 #include <sys/rwlock.h>
19 #include <sys/sglist.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <vm/vm.h>
23 #include <vm/pmap.h>
24 #include <vm/vm_kern.h>
25 #include <vm/vm_map.h>
26 #include <vm/vm_object.h>
27 #include <vm/vm_page.h>
28 #include <vm/vm_pager.h>
29 #include <vm/vm_param.h>
30 
31 #define TX_RINGS_DISABLE 0
32 #define TX_RINGS_ENABLE 1
33 #define PKE_REQ_SIZE 64
34 #define BASE_ADDR_SHIFT 6
35 #define PKE_RX_RING_0 0
36 #define PKE_RX_RING_1 1
37 
38 #define ADF_RING_EMPTY_RETRY_DELAY 2
39 #define ADF_RING_EMPTY_MAX_RETRY 15
40 
41 struct bundle_orphan_ring {
42 	unsigned long tx_mask;
43 	unsigned long rx_mask;
44 	unsigned long asym_mask;
45 	int bank;
46 	struct resource *csr_base;
47 	struct adf_uio_control_bundle *bundle;
48 };
49 
50 /*
51  *    if orphan->tx_mask does not match with orphan->rx_mask
52  */
53 static void
54 check_orphan_ring(struct adf_accel_dev *accel_dev,
55 		  struct bundle_orphan_ring *orphan,
56 		  struct adf_hw_device_data *hw_data)
57 {
58 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
59 	int i;
60 	int tx_rx_gap = hw_data->tx_rx_gap;
61 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
62 	struct resource *csr_base = orphan->csr_base;
63 	int bank = orphan->bank;
64 
65 	for (i = 0; i < num_rings_per_bank; i++) {
66 		if (test_bit(i, &orphan->tx_mask)) {
67 			int rx_ring = i + tx_rx_gap;
68 
69 			if (!test_bit(rx_ring, &orphan->rx_mask)) {
70 				__clear_bit(i, &orphan->tx_mask);
71 
72 				/* clean up this tx ring  */
73 				csr_ops->write_csr_ring_config(csr_base,
74 							       bank,
75 							       i,
76 							       0);
77 				csr_ops->write_csr_ring_base(csr_base,
78 							     bank,
79 							     i,
80 							     0);
81 			}
82 
83 		} else if (test_bit(i, &orphan->rx_mask)) {
84 			int tx_ring = i - tx_rx_gap;
85 
86 			if (!test_bit(tx_ring, &orphan->tx_mask)) {
87 				__clear_bit(i, &orphan->rx_mask);
88 
89 				/* clean up this rx ring */
90 				csr_ops->write_csr_ring_config(csr_base,
91 							       bank,
92 							       i,
93 							       0);
94 				csr_ops->write_csr_ring_base(csr_base,
95 							     bank,
96 							     i,
97 							     0);
98 			}
99 		}
100 	}
101 }
102 
103 static int
104 get_orphan_bundle(int bank,
105 		  struct adf_uio_control_accel *accel,
106 		  struct bundle_orphan_ring **orphan_bundle_out)
107 {
108 	int i;
109 	int ret = 0;
110 	struct resource *csr_base;
111 	unsigned long tx_mask;
112 	unsigned long asym_mask;
113 	struct adf_accel_dev *accel_dev = accel->accel_dev;
114 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
115 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
116 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
117 	struct bundle_orphan_ring *orphan_bundle = NULL;
118 	uint64_t base;
119 	struct list_head *entry;
120 	struct adf_uio_instance_rings *instance_rings;
121 	struct adf_uio_control_bundle *bundle;
122 	u16 ring_mask = 0;
123 
124 	orphan_bundle =
125 	    malloc(sizeof(*orphan_bundle), M_QAT, M_WAITOK | M_ZERO);
126 	if (!orphan_bundle)
127 		return ENOMEM;
128 
129 	csr_base = accel->bar->virt_addr;
130 	orphan_bundle->csr_base = csr_base;
131 	orphan_bundle->bank = bank;
132 
133 	orphan_bundle->tx_mask = 0;
134 	orphan_bundle->rx_mask = 0;
135 	tx_mask = accel_dev->hw_device->tx_rings_mask;
136 	asym_mask = accel_dev->hw_device->asym_rings_mask;
137 
138 	/* Get ring mask for this process. */
139 	bundle = &accel->bundle[bank];
140 	orphan_bundle->bundle = bundle;
141 	mutex_lock(&bundle->list_lock);
142 	list_for_each(entry, &bundle->list)
143 	{
144 		instance_rings =
145 		    list_entry(entry, struct adf_uio_instance_rings, list);
146 		if (instance_rings->user_pid == curproc->p_pid) {
147 			ring_mask = instance_rings->ring_mask;
148 			break;
149 		}
150 	}
151 	mutex_unlock(&bundle->list_lock);
152 
153 	for (i = 0; i < num_rings_per_bank; i++) {
154 		base = csr_ops->read_csr_ring_base(csr_base, bank, i);
155 
156 		if (!base)
157 			continue;
158 		if (!(ring_mask & 1 << i))
159 			continue; /* Not reserved for this process. */
160 
161 		if (test_bit(i, &tx_mask))
162 			__set_bit(i, &orphan_bundle->tx_mask);
163 		else
164 			__set_bit(i, &orphan_bundle->rx_mask);
165 
166 		if (test_bit(i, &asym_mask))
167 			__set_bit(i, &orphan_bundle->asym_mask);
168 	}
169 
170 	if (orphan_bundle->tx_mask || orphan_bundle->rx_mask)
171 		check_orphan_ring(accel_dev, orphan_bundle, hw_data);
172 
173 	*orphan_bundle_out = orphan_bundle;
174 	return ret;
175 }
176 
177 static void
178 put_orphan_bundle(struct bundle_orphan_ring *bundle)
179 {
180 	if (!bundle)
181 		return;
182 
183 	free(bundle, M_QAT);
184 }
185 
186 /* cleanup all ring  */
187 static void
188 cleanup_all_ring(struct adf_uio_control_accel *accel,
189 		 struct bundle_orphan_ring *orphan)
190 {
191 	int i;
192 	struct resource *csr_base = orphan->csr_base;
193 	unsigned long mask = orphan->rx_mask | orphan->tx_mask;
194 	struct adf_accel_dev *accel_dev = accel->accel_dev;
195 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
196 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
197 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
198 	int bank = orphan->bank;
199 
200 	mutex_lock(&orphan->bundle->lock);
201 	orphan->bundle->rings_enabled &= ~mask;
202 	adf_update_uio_ring_arb(orphan->bundle);
203 	mutex_unlock(&orphan->bundle->lock);
204 
205 	for (i = 0; i < num_rings_per_bank; i++) {
206 		if (!test_bit(i, &mask))
207 			continue;
208 
209 		csr_ops->write_csr_ring_config(csr_base, bank, i, 0);
210 		csr_ops->write_csr_ring_base(csr_base, bank, i, 0);
211 	}
212 }
213 
214 /*
215  * Return true, if number of messages in tx ring is equal to number
216  * of messages in corresponding rx ring, else false.
217  */
218 static bool
219 is_all_resp_recvd(struct adf_hw_csr_ops *csr_ops,
220 		  struct bundle_orphan_ring *bundle,
221 		  const u8 num_rings_per_bank)
222 {
223 	u32 rx_tail = 0, tx_head = 0, rx_ring_msg_offset = 0,
224 	    tx_ring_msg_offset = 0, tx_rx_offset = num_rings_per_bank / 2,
225 	    idx = 0, retry = 0, delay = ADF_RING_EMPTY_RETRY_DELAY;
226 
227 	do {
228 		for_each_set_bit(idx, &bundle->tx_mask, tx_rx_offset)
229 		{
230 			rx_tail =
231 			    csr_ops->read_csr_ring_tail(bundle->csr_base,
232 							0,
233 							(idx + tx_rx_offset));
234 			tx_head = csr_ops->read_csr_ring_head(bundle->csr_base,
235 							      0,
236 							      idx);
237 
238 			/*
239 			 * Normalize messages in tx rings to match rx ring
240 			 * message size, i.e., size of response message(32).
241 			 * Asym messages are 64 bytes each, so right shift
242 			 * by 1 to normalize to 32. Sym and compression
243 			 * messages are 128 bytes each, so right shift by 2
244 			 * to normalize to 32.
245 			 */
246 			if (bundle->asym_mask & (1 << idx))
247 				tx_ring_msg_offset = (tx_head >> 1);
248 			else
249 				tx_ring_msg_offset = (tx_head >> 2);
250 
251 			rx_ring_msg_offset = rx_tail;
252 
253 			if (tx_ring_msg_offset != rx_ring_msg_offset)
254 				break;
255 		}
256 		if (idx == tx_rx_offset)
257 			/* All Tx and Rx ring message counts match */
258 			return true;
259 
260 		DELAY(delay);
261 		delay *= 2;
262 	} while (++retry < ADF_RING_EMPTY_MAX_RETRY);
263 
264 	return false;
265 }
266 
267 static int
268 bundle_need_cleanup(int bank, struct adf_uio_control_accel *accel)
269 {
270 	struct resource *csr_base = accel->bar->virt_addr;
271 	struct adf_accel_dev *accel_dev = accel->accel_dev;
272 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
273 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
274 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
275 	int i;
276 
277 	if (!csr_base)
278 		return 0;
279 
280 	for (i = 0; i < num_rings_per_bank; i++) {
281 		if (csr_ops->read_csr_ring_base(csr_base, bank, i))
282 			return 1;
283 	}
284 
285 	return 0;
286 }
287 
288 static void
289 cleanup_orphan_ring(struct bundle_orphan_ring *orphan,
290 		    struct adf_uio_control_accel *accel)
291 {
292 	struct adf_accel_dev *accel_dev = accel->accel_dev;
293 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
294 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
295 	u8 number_rings_per_bank = hw_data->num_rings_per_bank;
296 
297 	/* disable the interrupt */
298 	csr_ops->write_csr_int_col_en(orphan->csr_base, orphan->bank, 0);
299 
300 	/*
301 	 * wait firmware finish the in-process ring
302 	 * 1. disable all tx rings
303 	 * 2. check if all responses are received
304 	 * 3. reset all rings
305 	 */
306 	adf_disable_ring_arb(accel_dev, orphan->csr_base, 0, orphan->tx_mask);
307 
308 	if (!is_all_resp_recvd(csr_ops, orphan, number_rings_per_bank)) {
309 		device_printf(GET_DEV(accel_dev),
310 			      "Failed to clean up orphan rings");
311 		return;
312 	}
313 
314 	/*
315 	 * When the execution reaches here, it is assumed that
316 	 * there is no inflight request in the rings and that
317 	 * there is no in-process ring.
318 	 */
319 
320 	cleanup_all_ring(accel, orphan);
321 }
322 
323 void
324 adf_uio_do_cleanup_orphan(int bank, struct adf_uio_control_accel *accel)
325 {
326 	int ret, pid_found;
327 	struct adf_uio_instance_rings *instance_rings, *tmp;
328 	struct adf_uio_control_bundle *bundle;
329 	/* orphan is local pointer allocated and deallocated in this function */
330 	struct bundle_orphan_ring *orphan = NULL;
331 	struct adf_accel_dev *accel_dev = accel->accel_dev;
332 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
333 
334 	if (!bundle_need_cleanup(bank, accel))
335 		goto release;
336 
337 	ret = get_orphan_bundle(bank, accel, &orphan);
338 	if (ret != 0)
339 		return;
340 
341 	/*
342 	 * If driver supports ring pair reset, no matter process
343 	 * exits normally or abnormally, just do ring pair reset.
344 	 * ring pair reset will reset all ring pair registers to
345 	 * default value. Driver only needs to reset ring mask
346 	 */
347 	if (hw_data->ring_pair_reset) {
348 		hw_data->ring_pair_reset(
349 		    accel_dev, orphan->bundle->hardware_bundle_number);
350 		mutex_lock(&orphan->bundle->lock);
351 		/*
352 		 * If processes exit normally, rx_mask, tx_mask
353 		 * and rings_enabled are all 0, below expression
354 		 * have no impact on rings_enabled.
355 		 * If processes exit abnormally, rings_enabled
356 		 * will be set as 0 by below expression.
357 		 */
358 		orphan->bundle->rings_enabled &=
359 		    ~(orphan->rx_mask | orphan->tx_mask);
360 		mutex_unlock(&orphan->bundle->lock);
361 		goto out;
362 	}
363 
364 	if (!orphan->tx_mask && !orphan->rx_mask)
365 		goto out;
366 
367 	device_printf(GET_DEV(accel_dev),
368 		      "Process %d %s exit with orphan rings %lx:%lx\n",
369 		      curproc->p_pid,
370 		      curproc->p_comm,
371 		      orphan->tx_mask,
372 		      orphan->rx_mask);
373 
374 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
375 		cleanup_orphan_ring(orphan, accel);
376 	}
377 out:
378 	put_orphan_bundle(orphan);
379 
380 release:
381 
382 	bundle = &accel->bundle[bank];
383 	/*
384 	 * If the user process died without releasing the rings
385 	 * then force a release here.
386 	 */
387 	mutex_lock(&bundle->list_lock);
388 	pid_found = 0;
389 	list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list)
390 	{
391 		if (instance_rings->user_pid == curproc->p_pid) {
392 			pid_found = 1;
393 			break;
394 		}
395 	}
396 	mutex_unlock(&bundle->list_lock);
397 
398 	if (pid_found) {
399 		mutex_lock(&bundle->lock);
400 		bundle->rings_used &= ~instance_rings->ring_mask;
401 		mutex_unlock(&bundle->lock);
402 	}
403 }
404