xref: /freebsd/sys/dev/qat/qat_common/adf_transport.c (revision 29fc4075e69fd27de0cded313ac6000165d99f8b)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <linux/delay.h>
15 #include "adf_accel_devices.h"
16 #include "adf_transport_internal.h"
17 #include "adf_transport_access_macros.h"
18 #include "adf_cfg.h"
19 #include "adf_common_drv.h"
20 
21 #define QAT_RING_ALIGNMENT 64
22 
23 static inline u32
24 adf_modulo(u32 data, u32 shift)
25 {
26 	u32 div = data >> shift;
27 	u32 mult = div << shift;
28 
29 	return data - mult;
30 }
31 
32 static inline int
33 adf_check_ring_alignment(u64 addr, u64 size)
34 {
35 	if (((size - 1) & addr) != 0)
36 		return EFAULT;
37 	return 0;
38 }
39 
40 static int
41 adf_verify_ring_size(u32 msg_size, u32 msg_num)
42 {
43 	int i = ADF_MIN_RING_SIZE;
44 
45 	for (; i <= ADF_MAX_RING_SIZE; i++)
46 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
47 			return i;
48 
49 	return ADF_DEFAULT_RING_SIZE;
50 }
51 
52 static int
53 adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
54 {
55 	mtx_lock(&bank->lock);
56 	if (bank->ring_mask & (1 << ring)) {
57 		mtx_unlock(&bank->lock);
58 		return EFAULT;
59 	}
60 	bank->ring_mask |= (1 << ring);
61 	mtx_unlock(&bank->lock);
62 	return 0;
63 }
64 
65 static void
66 adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
67 {
68 	mtx_lock(&bank->lock);
69 	bank->ring_mask &= ~(1 << ring);
70 	mtx_unlock(&bank->lock);
71 }
72 
73 static void
74 adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
75 {
76 	mtx_lock(&bank->lock);
77 	bank->irq_mask |= (1 << ring);
78 	mtx_unlock(&bank->lock);
79 	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
80 	WRITE_CSR_INT_COL_CTL(bank->csr_addr,
81 			      bank->bank_number,
82 			      bank->irq_coalesc_timer);
83 }
84 
85 static void
86 adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
87 {
88 	mtx_lock(&bank->lock);
89 	bank->irq_mask &= ~(1 << ring);
90 	mtx_unlock(&bank->lock);
91 	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
92 }
93 
94 int
95 adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
96 {
97 	u32 msg_size = 0;
98 
99 	if (atomic_add_return(1, ring->inflights) > ring->max_inflights) {
100 		atomic_dec(ring->inflights);
101 		return EAGAIN;
102 	}
103 
104 	msg_size = ADF_MSG_SIZE_TO_BYTES(ring->msg_size);
105 	mtx_lock(&ring->lock);
106 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail),
107 	       msg,
108 	       msg_size);
109 
110 	ring->tail = adf_modulo(ring->tail + msg_size,
111 				ADF_RING_SIZE_MODULO(ring->ring_size));
112 
113 	WRITE_CSR_RING_TAIL(ring->bank->csr_addr,
114 			    ring->bank->bank_number,
115 			    ring->ring_number,
116 			    ring->tail);
117 	ring->csr_tail_offset = ring->tail;
118 	mtx_unlock(&ring->lock);
119 	return 0;
120 }
121 
122 int
123 adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
124 {
125 	u32 msg_counter = 0;
126 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
127 
128 	if (!quota)
129 		quota = ADF_NO_RESPONSE_QUOTA;
130 
131 	while ((*msg != ADF_RING_EMPTY_SIG) && (msg_counter < quota)) {
132 		ring->callback((u32 *)msg);
133 		atomic_dec(ring->inflights);
134 		*msg = ADF_RING_EMPTY_SIG;
135 		ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(
136 							 ring->msg_size),
137 					ADF_RING_SIZE_MODULO(ring->ring_size));
138 		msg_counter++;
139 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
140 	}
141 	if (msg_counter > 0)
142 		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
143 				    ring->bank->bank_number,
144 				    ring->ring_number,
145 				    ring->head);
146 	return msg_counter;
147 }
148 
149 int
150 adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota)
151 {
152 	int num_resp;
153 	struct adf_accel_dev *accel_dev;
154 	struct adf_etr_data *trans_data;
155 	struct adf_etr_bank_data *bank;
156 	struct adf_etr_ring_data *ring;
157 	u32 rings_not_empty;
158 	u32 ring_num;
159 	u32 resp_total = 0;
160 	u32 num_rings_per_bank;
161 
162 	/* Find the accel device associated with the accelId
163 	 * passed in.
164 	 */
165 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
166 	if (!accel_dev) {
167 		pr_err("There is no device with id: %d\n", accel_id);
168 		return EINVAL;
169 	}
170 
171 	trans_data = accel_dev->transport;
172 	bank = &trans_data->banks[bank_num];
173 	mtx_lock(&bank->lock);
174 
175 	/* Read the ring status CSR to determine which rings are empty. */
176 	rings_not_empty = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
177 	/* Complement to find which rings have data to be processed. */
178 	rings_not_empty = (~rings_not_empty) & bank->ring_mask;
179 
180 	/* Return RETRY if the bank polling rings
181 	 * are all empty.
182 	 */
183 	if (!(rings_not_empty & bank->ring_mask)) {
184 		mtx_unlock(&bank->lock);
185 		return EAGAIN;
186 	}
187 
188 	/*
189 	 * Loop over all rings within this bank.
190 	 * The ring structure is global to all
191 	 * rings hence while we loop over all rings in the
192 	 * bank we use ring_number to get the global ring.
193 	 */
194 	num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
195 	for (ring_num = 0; ring_num < num_rings_per_bank; ring_num++) {
196 		ring = &bank->rings[ring_num];
197 
198 		/* And with polling ring mask.
199 		 * If the there is no data on this ring
200 		 * move to the next one.
201 		 */
202 		if (!(rings_not_empty & (1 << ring->ring_number)))
203 			continue;
204 
205 		/* Poll the ring. */
206 		num_resp = adf_handle_response(ring, quota);
207 		resp_total += num_resp;
208 	}
209 
210 	mtx_unlock(&bank->lock);
211 	/* Return SUCCESS if there's any response message
212 	 * returned.
213 	 */
214 	if (resp_total)
215 		return 0;
216 	return EAGAIN;
217 }
218 
219 int
220 adf_poll_all_banks(u32 accel_id, u32 quota)
221 {
222 	int status = EAGAIN;
223 	struct adf_accel_dev *accel_dev;
224 	struct adf_etr_data *trans_data;
225 	struct adf_etr_bank_data *bank;
226 	u32 bank_num;
227 	u32 stat_total = 0;
228 
229 	/* Find the accel device associated with the accelId
230 	 * passed in.
231 	 */
232 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
233 	if (!accel_dev) {
234 		pr_err("There is no device with id: %d\n", accel_id);
235 		return EINVAL;
236 	}
237 
238 	/* Loop over banks and call adf_poll_bank */
239 	trans_data = accel_dev->transport;
240 	for (bank_num = 0; bank_num < GET_MAX_BANKS(accel_dev); bank_num++) {
241 		bank = &trans_data->banks[bank_num];
242 		/* if there are no polling rings on this bank
243 		 * continue to the next bank number.
244 		 */
245 		if (bank->ring_mask == 0)
246 			continue;
247 		status = adf_poll_bank(accel_id, bank_num, quota);
248 		/* The successful status should be AGAIN or 0 */
249 		if (status == 0)
250 			stat_total++;
251 		else if (status != EAGAIN)
252 			return status;
253 	}
254 
255 	/* Return SUCCESS if adf_poll_bank returned SUCCESS
256 	 * at any stage. adf_poll_bank cannot
257 	 * return fail in the above case.
258 	 */
259 	if (stat_total)
260 		return 0;
261 
262 	return EAGAIN;
263 }
264 
265 static void
266 adf_configure_tx_ring(struct adf_etr_ring_data *ring)
267 {
268 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
269 
270 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr,
271 			      ring->bank->bank_number,
272 			      ring->ring_number,
273 			      ring_config);
274 }
275 
276 static void
277 adf_configure_rx_ring(struct adf_etr_ring_data *ring)
278 {
279 	u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size,
280 						 ADF_RING_NEAR_WATERMARK_512,
281 						 ADF_RING_NEAR_WATERMARK_0);
282 
283 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr,
284 			      ring->bank->bank_number,
285 			      ring->ring_number,
286 			      ring_config);
287 }
288 
289 static int
290 adf_init_ring(struct adf_etr_ring_data *ring)
291 {
292 	struct adf_etr_bank_data *bank = ring->bank;
293 	struct adf_accel_dev *accel_dev = bank->accel_dev;
294 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
295 	u64 ring_base;
296 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
297 
298 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
299 	int ret;
300 
301 	ret = bus_dma_mem_create(&ring->dma_mem,
302 				 accel_dev->dma_tag,
303 				 ring_size_bytes,
304 				 BUS_SPACE_MAXADDR,
305 				 ring_size_bytes,
306 				 M_WAITOK | M_ZERO);
307 	if (ret)
308 		return ret;
309 	ring->base_addr = ring->dma_mem.dma_vaddr;
310 	ring->dma_addr = ring->dma_mem.dma_baddr;
311 
312 	memset(ring->base_addr, 0x7F, ring_size_bytes);
313 	/* The base_addr has to be aligned to the size of the buffer */
314 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
315 		device_printf(GET_DEV(accel_dev), "Ring address not aligned\n");
316 		bus_dma_mem_free(&ring->dma_mem);
317 		ring->base_addr = NULL;
318 		return EFAULT;
319 	}
320 
321 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
322 		adf_configure_tx_ring(ring);
323 	else
324 		adf_configure_rx_ring(ring);
325 
326 	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
327 	WRITE_CSR_RING_BASE(ring->bank->csr_addr,
328 			    ring->bank->bank_number,
329 			    ring->ring_number,
330 			    ring_base);
331 	mtx_init(&ring->lock, "adf bank", NULL, MTX_DEF);
332 	return 0;
333 }
334 
335 static void
336 adf_cleanup_ring(struct adf_etr_ring_data *ring)
337 {
338 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
339 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
340 
341 	if (ring->base_addr) {
342 		explicit_bzero(ring->base_addr, ring_size_bytes);
343 		bus_dma_mem_free(&ring->dma_mem);
344 	}
345 	mtx_destroy(&ring->lock);
346 }
347 
348 int
349 adf_create_ring(struct adf_accel_dev *accel_dev,
350 		const char *section,
351 		u32 bank_num,
352 		u32 num_msgs,
353 		u32 msg_size,
354 		const char *ring_name,
355 		adf_callback_fn callback,
356 		int poll_mode,
357 		struct adf_etr_ring_data **ring_ptr)
358 {
359 	struct adf_etr_data *transport_data = accel_dev->transport;
360 	struct adf_etr_bank_data *bank;
361 	struct adf_etr_ring_data *ring;
362 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
363 	u32 ring_num;
364 	int ret;
365 	u8 num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
366 
367 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
368 		device_printf(GET_DEV(accel_dev), "Invalid bank number\n");
369 		return EFAULT;
370 	}
371 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
372 		device_printf(GET_DEV(accel_dev), "Invalid msg size\n");
373 		return EFAULT;
374 	}
375 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
376 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
377 		device_printf(GET_DEV(accel_dev),
378 			      "Invalid ring size for given msg size\n");
379 		return EFAULT;
380 	}
381 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
382 		device_printf(GET_DEV(accel_dev),
383 			      "Section %s, no such entry : %s\n",
384 			      section,
385 			      ring_name);
386 		return EFAULT;
387 	}
388 	if (compat_strtouint(val, 10, &ring_num)) {
389 		device_printf(GET_DEV(accel_dev), "Can't get ring number\n");
390 		return EFAULT;
391 	}
392 	if (ring_num >= num_rings_per_bank) {
393 		device_printf(GET_DEV(accel_dev), "Invalid ring number\n");
394 		return EFAULT;
395 	}
396 
397 	bank = &transport_data->banks[bank_num];
398 	if (adf_reserve_ring(bank, ring_num)) {
399 		device_printf(GET_DEV(accel_dev),
400 			      "Ring %d, %s already exists.\n",
401 			      ring_num,
402 			      ring_name);
403 		return EFAULT;
404 	}
405 	ring = &bank->rings[ring_num];
406 	ring->ring_number = ring_num;
407 	ring->bank = bank;
408 	ring->callback = callback;
409 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
410 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
411 	ring->max_inflights =
412 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
413 	ring->head = 0;
414 	ring->tail = 0;
415 	ring->csr_tail_offset = 0;
416 	ret = adf_init_ring(ring);
417 	if (ret)
418 		goto err;
419 
420 	/* Enable HW arbitration for the given ring */
421 	adf_update_ring_arb(ring);
422 
423 	if (adf_ring_debugfs_add(ring, ring_name)) {
424 		device_printf(GET_DEV(accel_dev),
425 			      "Couldn't add ring debugfs entry\n");
426 		ret = EFAULT;
427 		goto err;
428 	}
429 
430 	/* Enable interrupts if needed */
431 	if (callback && !poll_mode)
432 		adf_enable_ring_irq(bank, ring->ring_number);
433 	*ring_ptr = ring;
434 	return 0;
435 err:
436 	adf_cleanup_ring(ring);
437 	adf_unreserve_ring(bank, ring_num);
438 	adf_update_ring_arb(ring);
439 	return ret;
440 }
441 
442 void
443 adf_remove_ring(struct adf_etr_ring_data *ring)
444 {
445 	struct adf_etr_bank_data *bank = ring->bank;
446 
447 	/* Disable interrupts for the given ring */
448 	adf_disable_ring_irq(bank, ring->ring_number);
449 
450 	/* Clear PCI config space */
451 	WRITE_CSR_RING_CONFIG(bank->csr_addr,
452 			      bank->bank_number,
453 			      ring->ring_number,
454 			      0);
455 	WRITE_CSR_RING_BASE(bank->csr_addr,
456 			    bank->bank_number,
457 			    ring->ring_number,
458 			    0);
459 	adf_ring_debugfs_rm(ring);
460 	adf_unreserve_ring(bank, ring->ring_number);
461 	/* Disable HW arbitration for the given ring */
462 	adf_update_ring_arb(ring);
463 	adf_cleanup_ring(ring);
464 }
465 
466 static void
467 adf_ring_response_handler(struct adf_etr_bank_data *bank)
468 {
469 	struct adf_accel_dev *accel_dev = bank->accel_dev;
470 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
471 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
472 	u32 empty_rings, i;
473 
474 	empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
475 	empty_rings = ~empty_rings & bank->irq_mask;
476 
477 	for (i = 0; i < num_rings_per_bank; ++i) {
478 		if (empty_rings & (1 << i))
479 			adf_handle_response(&bank->rings[i], 0);
480 	}
481 }
482 
483 void
484 adf_response_handler(uintptr_t bank_addr)
485 {
486 	struct adf_etr_bank_data *bank = (void *)bank_addr;
487 
488 	/* Handle all the responses and re-enable IRQs */
489 	adf_ring_response_handler(bank);
490 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr,
491 				   bank->bank_number,
492 				   bank->irq_mask);
493 }
494 
495 static inline int
496 adf_get_cfg_int(struct adf_accel_dev *accel_dev,
497 		const char *section,
498 		const char *format,
499 		u32 key,
500 		u32 *value)
501 {
502 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
503 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
504 
505 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
506 
507 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
508 		return EFAULT;
509 
510 	if (compat_strtouint(val_buf, 10, value))
511 		return EFAULT;
512 	return 0;
513 }
514 
515 static void
516 adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
517 		      const char *section,
518 		      u32 bank_num_in_accel)
519 {
520 	struct adf_accel_dev *accel_dev = bank->accel_dev;
521 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
522 	u32 coalesc_timer = ADF_COALESCING_DEF_TIME;
523 
524 	adf_get_cfg_int(accel_dev,
525 			section,
526 			ADF_ETRMGR_COALESCE_TIMER_FORMAT,
527 			bank_num_in_accel,
528 			&coalesc_timer);
529 
530 	if (hw_data->get_clock_speed)
531 		bank->irq_coalesc_timer =
532 		    (coalesc_timer *
533 		     (hw_data->get_clock_speed(hw_data) / USEC_PER_SEC)) /
534 		    NSEC_PER_USEC;
535 	else
536 		bank->irq_coalesc_timer = coalesc_timer;
537 
538 	if (bank->irq_coalesc_timer > ADF_COALESCING_MAX_TIME)
539 		bank->irq_coalesc_timer = ADF_COALESCING_MAX_TIME;
540 	else if (bank->irq_coalesc_timer < ADF_COALESCING_MIN_TIME)
541 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
542 }
543 
544 static int
545 adf_init_bank(struct adf_accel_dev *accel_dev,
546 	      struct adf_etr_bank_data *bank,
547 	      u32 bank_num,
548 	      struct resource *csr_addr)
549 {
550 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
551 	struct adf_etr_ring_data *ring;
552 	struct adf_etr_ring_data *tx_ring;
553 	u32 i, coalesc_enabled = 0;
554 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
555 	u32 size = 0;
556 
557 	explicit_bzero(bank, sizeof(*bank));
558 	bank->bank_number = bank_num;
559 	bank->csr_addr = csr_addr;
560 	bank->accel_dev = accel_dev;
561 	mtx_init(&bank->lock, "adf bank", NULL, MTX_DEF);
562 
563 	/* Allocate the rings in the bank */
564 	size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
565 	bank->rings = kzalloc_node(size,
566 				   M_WAITOK | M_ZERO,
567 				   dev_to_node(GET_DEV(accel_dev)));
568 
569 	/* Enable IRQ coalescing always. This will allow to use
570 	 * the optimised flag and coalesc register.
571 	 * If it is disabled in the config file just use min time value */
572 	if ((adf_get_cfg_int(accel_dev,
573 			     "Accelerator0",
574 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
575 			     bank_num,
576 			     &coalesc_enabled) == 0) &&
577 	    coalesc_enabled)
578 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
579 	else
580 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
581 
582 	for (i = 0; i < num_rings_per_bank; i++) {
583 		WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
584 		WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
585 		ring = &bank->rings[i];
586 		if (hw_data->tx_rings_mask & (1 << i)) {
587 			ring->inflights =
588 			    kzalloc_node(sizeof(atomic_t),
589 					 M_WAITOK | M_ZERO,
590 					 dev_to_node(GET_DEV(accel_dev)));
591 		} else {
592 			if (i < hw_data->tx_rx_gap) {
593 				device_printf(GET_DEV(accel_dev),
594 					      "Invalid tx rings mask config\n");
595 				goto err;
596 			}
597 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
598 			ring->inflights = tx_ring->inflights;
599 		}
600 	}
601 
602 	if (adf_bank_debugfs_add(bank)) {
603 		device_printf(GET_DEV(accel_dev),
604 			      "Failed to add bank debugfs entry\n");
605 		goto err;
606 	}
607 
608 	WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
609 	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
610 	return 0;
611 err:
612 	for (i = 0; i < num_rings_per_bank; i++) {
613 		ring = &bank->rings[i];
614 		if (hw_data->tx_rings_mask & (1 << i)) {
615 			kfree(ring->inflights);
616 			ring->inflights = NULL;
617 		}
618 	}
619 	kfree(bank->rings);
620 	return ENOMEM;
621 }
622 
623 /**
624  * adf_init_etr_data() - Initialize transport rings for acceleration device
625  * @accel_dev:  Pointer to acceleration device.
626  *
627  * Function initializes the communications channels (rings) to the
628  * acceleration device accel_dev.
629  * To be used by QAT device specific drivers.
630  *
631  * Return: 0 on success, error code otherwise.
632  */
633 int
634 adf_init_etr_data(struct adf_accel_dev *accel_dev)
635 {
636 	struct adf_etr_data *etr_data;
637 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
638 	struct resource *csr_addr;
639 	u32 size;
640 	u32 num_banks = 0;
641 	int i, ret;
642 
643 	etr_data = kzalloc_node(sizeof(*etr_data),
644 				M_WAITOK | M_ZERO,
645 				dev_to_node(GET_DEV(accel_dev)));
646 
647 	num_banks = GET_MAX_BANKS(accel_dev);
648 	size = num_banks * sizeof(struct adf_etr_bank_data);
649 	etr_data->banks = kzalloc_node(size,
650 				       M_WAITOK | M_ZERO,
651 				       dev_to_node(GET_DEV(accel_dev)));
652 
653 	accel_dev->transport = etr_data;
654 	i = hw_data->get_etr_bar_id(hw_data);
655 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
656 
657 	etr_data->debug =
658 	    SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx,
659 			    SYSCTL_CHILDREN(
660 				device_get_sysctl_tree(GET_DEV(accel_dev))),
661 			    OID_AUTO,
662 			    "transport",
663 			    CTLFLAG_RD,
664 			    NULL,
665 			    "Transport parameters");
666 	if (!etr_data->debug) {
667 		device_printf(GET_DEV(accel_dev),
668 			      "Unable to create transport debugfs entry\n");
669 		ret = ENOENT;
670 		goto err_bank_all;
671 	}
672 
673 	for (i = 0; i < num_banks; i++) {
674 		ret =
675 		    adf_init_bank(accel_dev, &etr_data->banks[i], i, csr_addr);
676 		if (ret)
677 			goto err_bank_all;
678 	}
679 
680 	return 0;
681 
682 err_bank_all:
683 	kfree(etr_data->banks);
684 	kfree(etr_data);
685 	accel_dev->transport = NULL;
686 	return ret;
687 }
688 
689 static void
690 cleanup_bank(struct adf_etr_bank_data *bank)
691 {
692 	u32 i;
693 	struct adf_accel_dev *accel_dev = bank->accel_dev;
694 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
695 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
696 
697 	for (i = 0; i < num_rings_per_bank; i++) {
698 		struct adf_accel_dev *accel_dev = bank->accel_dev;
699 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
700 		struct adf_etr_ring_data *ring = &bank->rings[i];
701 
702 		if (bank->ring_mask & (1 << i))
703 			adf_cleanup_ring(ring);
704 
705 		if (hw_data->tx_rings_mask & (1 << i)) {
706 			kfree(ring->inflights);
707 			ring->inflights = NULL;
708 		}
709 	}
710 	kfree(bank->rings);
711 	adf_bank_debugfs_rm(bank);
712 	mtx_destroy(&bank->lock);
713 	explicit_bzero(bank, sizeof(*bank));
714 }
715 
716 static void
717 adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
718 {
719 	struct adf_etr_data *etr_data = accel_dev->transport;
720 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
721 
722 	for (i = 0; i < num_banks; i++)
723 		cleanup_bank(&etr_data->banks[i]);
724 }
725 
726 /**
727  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
728  * @accel_dev:  Pointer to acceleration device.
729  *
730  * Function is the clears the communications channels (rings) of the
731  * acceleration device accel_dev.
732  * To be used by QAT device specific drivers.
733  *
734  * Return: void
735  */
736 void
737 adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
738 {
739 	struct adf_etr_data *etr_data = accel_dev->transport;
740 
741 	if (etr_data) {
742 		adf_cleanup_etr_handles(accel_dev);
743 		kfree(etr_data->banks);
744 		kfree(etr_data);
745 		accel_dev->transport = NULL;
746 	}
747 }
748