xref: /freebsd/sys/dev/qat/qat_common/adf_transport.c (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <linux/delay.h>
15 #include "adf_accel_devices.h"
16 #include "adf_transport_internal.h"
17 #include "adf_transport_access_macros.h"
18 #include "adf_cfg.h"
19 #include "adf_common_drv.h"
20 
21 #define QAT_RING_ALIGNMENT 64
22 
23 static inline u32
24 adf_modulo(u32 data, u32 shift)
25 {
26 	u32 div = data >> shift;
27 	u32 mult = div << shift;
28 
29 	return data - mult;
30 }
31 
32 static inline int
33 adf_check_ring_alignment(u64 addr, u64 size)
34 {
35 	if (((size - 1) & addr) != 0)
36 		return EFAULT;
37 	return 0;
38 }
39 
40 static int
41 adf_verify_ring_size(u32 msg_size, u32 msg_num)
42 {
43 	int i = ADF_MIN_RING_SIZE;
44 
45 	for (; i <= ADF_MAX_RING_SIZE; i++)
46 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
47 			return i;
48 
49 	return ADF_DEFAULT_RING_SIZE;
50 }
51 
52 static int
53 adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
54 {
55 	mtx_lock(&bank->lock);
56 	if (bank->ring_mask & (1 << ring)) {
57 		mtx_unlock(&bank->lock);
58 		return EFAULT;
59 	}
60 	bank->ring_mask |= (1 << ring);
61 	mtx_unlock(&bank->lock);
62 	return 0;
63 }
64 
65 static void
66 adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
67 {
68 	mtx_lock(&bank->lock);
69 	bank->ring_mask &= ~(1 << ring);
70 	mtx_unlock(&bank->lock);
71 }
72 
73 static void
74 adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
75 {
76 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
77 
78 	mtx_lock(&bank->lock);
79 	bank->irq_mask |= (1 << ring);
80 	mtx_unlock(&bank->lock);
81 	csr_ops->write_csr_int_col_en(bank->csr_addr,
82 				      bank->bank_number,
83 				      bank->irq_mask);
84 	csr_ops->write_csr_int_col_ctl(bank->csr_addr,
85 				       bank->bank_number,
86 				       bank->irq_coalesc_timer);
87 }
88 
89 static void
90 adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
91 {
92 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
93 
94 	mtx_lock(&bank->lock);
95 	bank->irq_mask &= ~(1 << ring);
96 	mtx_unlock(&bank->lock);
97 	csr_ops->write_csr_int_col_en(bank->csr_addr,
98 				      bank->bank_number,
99 				      bank->irq_mask);
100 }
101 
102 int
103 adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
104 {
105 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
106 	u32 msg_size = 0;
107 
108 	if (atomic_add_return(1, ring->inflights) > ring->max_inflights) {
109 		atomic_dec(ring->inflights);
110 		return EAGAIN;
111 	}
112 
113 	msg_size = ADF_MSG_SIZE_TO_BYTES(ring->msg_size);
114 	mtx_lock(&ring->lock);
115 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail),
116 	       msg,
117 	       msg_size);
118 
119 	ring->tail = adf_modulo(ring->tail + msg_size,
120 				ADF_RING_SIZE_MODULO(ring->ring_size));
121 
122 	csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
123 				     ring->bank->bank_number,
124 				     ring->ring_number,
125 				     ring->tail);
126 	ring->csr_tail_offset = ring->tail;
127 	mtx_unlock(&ring->lock);
128 	return 0;
129 }
130 
131 int
132 adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
133 {
134 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
135 	u32 msg_counter = 0;
136 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
137 
138 	if (!quota)
139 		quota = ADF_NO_RESPONSE_QUOTA;
140 
141 	while ((*msg != ADF_RING_EMPTY_SIG) && (msg_counter < quota)) {
142 		ring->callback((u32 *)msg);
143 		atomic_dec(ring->inflights);
144 		*msg = ADF_RING_EMPTY_SIG;
145 		ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(
146 							 ring->msg_size),
147 					ADF_RING_SIZE_MODULO(ring->ring_size));
148 		msg_counter++;
149 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
150 	}
151 	if (msg_counter > 0)
152 		csr_ops->write_csr_ring_head(ring->bank->csr_addr,
153 					     ring->bank->bank_number,
154 					     ring->ring_number,
155 					     ring->head);
156 	return msg_counter;
157 }
158 
159 int
160 adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota)
161 {
162 	int num_resp;
163 	struct adf_accel_dev *accel_dev;
164 	struct adf_etr_data *trans_data;
165 	struct adf_etr_bank_data *bank;
166 	struct adf_etr_ring_data *ring;
167 	struct adf_hw_csr_ops *csr_ops;
168 	u32 rings_not_empty;
169 	u32 ring_num;
170 	u32 resp_total = 0;
171 	u32 num_rings_per_bank;
172 
173 	/* Find the accel device associated with the accelId
174 	 * passed in.
175 	 */
176 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
177 	if (!accel_dev) {
178 		pr_err("There is no device with id: %d\n", accel_id);
179 		return EINVAL;
180 	}
181 
182 	csr_ops = GET_CSR_OPS(accel_dev);
183 	trans_data = accel_dev->transport;
184 	bank = &trans_data->banks[bank_num];
185 	mtx_lock(&bank->lock);
186 
187 	/* Read the ring status CSR to determine which rings are empty. */
188 	rings_not_empty =
189 	    csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
190 	/* Complement to find which rings have data to be processed. */
191 	rings_not_empty = (~rings_not_empty) & bank->ring_mask;
192 
193 	/* Return RETRY if the bank polling rings
194 	 * are all empty.
195 	 */
196 	if (!(rings_not_empty & bank->ring_mask)) {
197 		mtx_unlock(&bank->lock);
198 		return EAGAIN;
199 	}
200 
201 	/*
202 	 * Loop over all rings within this bank.
203 	 * The ring structure is global to all
204 	 * rings hence while we loop over all rings in the
205 	 * bank we use ring_number to get the global ring.
206 	 */
207 	num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
208 	for (ring_num = 0; ring_num < num_rings_per_bank; ring_num++) {
209 		ring = &bank->rings[ring_num];
210 
211 		/* And with polling ring mask.
212 		 * If the there is no data on this ring
213 		 * move to the next one.
214 		 */
215 		if (!(rings_not_empty & (1 << ring->ring_number)))
216 			continue;
217 
218 		/* Poll the ring. */
219 		num_resp = adf_handle_response(ring, quota);
220 		resp_total += num_resp;
221 	}
222 
223 	mtx_unlock(&bank->lock);
224 	/* Return SUCCESS if there's any response message
225 	 * returned.
226 	 */
227 	if (resp_total)
228 		return 0;
229 	return EAGAIN;
230 }
231 
232 int
233 adf_poll_all_banks(u32 accel_id, u32 quota)
234 {
235 	int status = EAGAIN;
236 	struct adf_accel_dev *accel_dev;
237 	struct adf_etr_data *trans_data;
238 	struct adf_etr_bank_data *bank;
239 	u32 bank_num;
240 	u32 stat_total = 0;
241 
242 	/* Find the accel device associated with the accelId
243 	 * passed in.
244 	 */
245 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
246 	if (!accel_dev) {
247 		pr_err("There is no device with id: %d\n", accel_id);
248 		return EINVAL;
249 	}
250 
251 	/* Loop over banks and call adf_poll_bank */
252 	trans_data = accel_dev->transport;
253 	for (bank_num = 0; bank_num < GET_MAX_BANKS(accel_dev); bank_num++) {
254 		bank = &trans_data->banks[bank_num];
255 		/* if there are no polling rings on this bank
256 		 * continue to the next bank number.
257 		 */
258 		if (bank->ring_mask == 0)
259 			continue;
260 		status = adf_poll_bank(accel_id, bank_num, quota);
261 		/* The successful status should be AGAIN or 0 */
262 		if (status == 0)
263 			stat_total++;
264 		else if (status != EAGAIN)
265 			return status;
266 	}
267 
268 	/* Return SUCCESS if adf_poll_bank returned SUCCESS
269 	 * at any stage. adf_poll_bank cannot
270 	 * return fail in the above case.
271 	 */
272 	if (stat_total)
273 		return 0;
274 
275 	return EAGAIN;
276 }
277 
278 static void
279 adf_configure_tx_ring(struct adf_etr_ring_data *ring)
280 {
281 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
282 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
283 
284 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
285 				       ring->bank->bank_number,
286 				       ring->ring_number,
287 				       ring_config);
288 }
289 
290 static void
291 adf_configure_rx_ring(struct adf_etr_ring_data *ring)
292 {
293 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
294 	u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size,
295 						 ADF_RING_NEAR_WATERMARK_512,
296 						 ADF_RING_NEAR_WATERMARK_0);
297 
298 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
299 				       ring->bank->bank_number,
300 				       ring->ring_number,
301 				       ring_config);
302 }
303 
304 static int
305 adf_init_ring(struct adf_etr_ring_data *ring)
306 {
307 	struct adf_etr_bank_data *bank = ring->bank;
308 	struct adf_accel_dev *accel_dev = bank->accel_dev;
309 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
310 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
311 	u64 ring_base;
312 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
313 
314 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
315 	int ret;
316 
317 	ret = bus_dma_mem_create(&ring->dma_mem,
318 				 accel_dev->dma_tag,
319 				 ring_size_bytes,
320 				 BUS_SPACE_MAXADDR,
321 				 ring_size_bytes,
322 				 M_WAITOK | M_ZERO);
323 	if (ret)
324 		return ret;
325 	ring->base_addr = ring->dma_mem.dma_vaddr;
326 	ring->dma_addr = ring->dma_mem.dma_baddr;
327 
328 	memset(ring->base_addr, 0x7F, ring_size_bytes);
329 	/* The base_addr has to be aligned to the size of the buffer */
330 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
331 		device_printf(GET_DEV(accel_dev), "Ring address not aligned\n");
332 		bus_dma_mem_free(&ring->dma_mem);
333 		ring->base_addr = NULL;
334 		return EFAULT;
335 	}
336 
337 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
338 		adf_configure_tx_ring(ring);
339 	else
340 		adf_configure_rx_ring(ring);
341 
342 	ring_base =
343 	    csr_ops->build_csr_ring_base_addr(ring->dma_addr, ring->ring_size);
344 	csr_ops->write_csr_ring_base(ring->bank->csr_addr,
345 				     ring->bank->bank_number,
346 				     ring->ring_number,
347 				     ring_base);
348 	mtx_init(&ring->lock, "adf bank", NULL, MTX_DEF);
349 	return 0;
350 }
351 
352 static void
353 adf_cleanup_ring(struct adf_etr_ring_data *ring)
354 {
355 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
356 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
357 
358 	if (ring->base_addr) {
359 		explicit_bzero(ring->base_addr, ring_size_bytes);
360 		bus_dma_mem_free(&ring->dma_mem);
361 	}
362 	mtx_destroy(&ring->lock);
363 }
364 
365 int
366 adf_create_ring(struct adf_accel_dev *accel_dev,
367 		const char *section,
368 		u32 bank_num,
369 		u32 num_msgs,
370 		u32 msg_size,
371 		const char *ring_name,
372 		adf_callback_fn callback,
373 		int poll_mode,
374 		struct adf_etr_ring_data **ring_ptr)
375 {
376 	struct adf_etr_data *transport_data = accel_dev->transport;
377 	struct adf_etr_bank_data *bank;
378 	struct adf_etr_ring_data *ring;
379 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
380 	u32 ring_num;
381 	int ret;
382 	u8 num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
383 
384 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
385 		device_printf(GET_DEV(accel_dev), "Invalid bank number\n");
386 		return EFAULT;
387 	}
388 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
389 		device_printf(GET_DEV(accel_dev), "Invalid msg size\n");
390 		return EFAULT;
391 	}
392 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
393 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
394 		device_printf(GET_DEV(accel_dev),
395 			      "Invalid ring size for given msg size\n");
396 		return EFAULT;
397 	}
398 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
399 		device_printf(GET_DEV(accel_dev),
400 			      "Section %s, no such entry : %s\n",
401 			      section,
402 			      ring_name);
403 		return EFAULT;
404 	}
405 	if (compat_strtouint(val, 10, &ring_num)) {
406 		device_printf(GET_DEV(accel_dev), "Can't get ring number\n");
407 		return EFAULT;
408 	}
409 	if (ring_num >= num_rings_per_bank) {
410 		device_printf(GET_DEV(accel_dev), "Invalid ring number\n");
411 		return EFAULT;
412 	}
413 
414 	bank = &transport_data->banks[bank_num];
415 	if (adf_reserve_ring(bank, ring_num)) {
416 		device_printf(GET_DEV(accel_dev),
417 			      "Ring %d, %s already exists.\n",
418 			      ring_num,
419 			      ring_name);
420 		return EFAULT;
421 	}
422 	ring = &bank->rings[ring_num];
423 	ring->ring_number = ring_num;
424 	ring->bank = bank;
425 	ring->callback = callback;
426 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
427 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
428 	ring->max_inflights =
429 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
430 	ring->head = 0;
431 	ring->tail = 0;
432 	ring->csr_tail_offset = 0;
433 	ret = adf_init_ring(ring);
434 	if (ret)
435 		goto err;
436 
437 	/* Enable HW arbitration for the given ring */
438 	adf_update_ring_arb(ring);
439 
440 	if (adf_ring_debugfs_add(ring, ring_name)) {
441 		device_printf(GET_DEV(accel_dev),
442 			      "Couldn't add ring debugfs entry\n");
443 		ret = EFAULT;
444 		goto err;
445 	}
446 
447 	/* Enable interrupts if needed */
448 	if (callback && !poll_mode)
449 		adf_enable_ring_irq(bank, ring->ring_number);
450 	*ring_ptr = ring;
451 	return 0;
452 err:
453 	adf_cleanup_ring(ring);
454 	adf_unreserve_ring(bank, ring_num);
455 	adf_update_ring_arb(ring);
456 	return ret;
457 }
458 
459 void
460 adf_remove_ring(struct adf_etr_ring_data *ring)
461 {
462 	struct adf_etr_bank_data *bank = ring->bank;
463 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
464 
465 	/* Disable interrupts for the given ring */
466 	adf_disable_ring_irq(bank, ring->ring_number);
467 
468 	/* Clear PCI config space */
469 	csr_ops->write_csr_ring_config(bank->csr_addr,
470 				       bank->bank_number,
471 				       ring->ring_number,
472 				       0);
473 	csr_ops->write_csr_ring_base(bank->csr_addr,
474 				     bank->bank_number,
475 				     ring->ring_number,
476 				     0);
477 	adf_ring_debugfs_rm(ring);
478 	adf_unreserve_ring(bank, ring->ring_number);
479 	/* Disable HW arbitration for the given ring */
480 	adf_update_ring_arb(ring);
481 	adf_cleanup_ring(ring);
482 }
483 
484 static void
485 adf_ring_response_handler(struct adf_etr_bank_data *bank)
486 {
487 	struct adf_accel_dev *accel_dev = bank->accel_dev;
488 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
489 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
490 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
491 	u32 empty_rings, i;
492 
493 	empty_rings =
494 	    csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
495 	empty_rings = ~empty_rings & bank->irq_mask;
496 
497 	for (i = 0; i < num_rings_per_bank; ++i) {
498 		if (empty_rings & (1 << i))
499 			adf_handle_response(&bank->rings[i], 0);
500 	}
501 }
502 
503 void
504 adf_response_handler(uintptr_t bank_addr)
505 {
506 	struct adf_etr_bank_data *bank = (void *)bank_addr;
507 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
508 
509 	/* Handle all the responses and re-enable IRQs */
510 	adf_ring_response_handler(bank);
511 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
512 					    bank->bank_number,
513 					    bank->irq_mask);
514 }
515 
516 static inline int
517 adf_get_cfg_int(struct adf_accel_dev *accel_dev,
518 		const char *section,
519 		const char *format,
520 		u32 key,
521 		u32 *value)
522 {
523 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
524 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
525 
526 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
527 
528 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
529 		return EFAULT;
530 
531 	if (compat_strtouint(val_buf, 10, value))
532 		return EFAULT;
533 	return 0;
534 }
535 
536 static void
537 adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
538 		      const char *section,
539 		      u32 bank_num_in_accel)
540 {
541 	struct adf_accel_dev *accel_dev = bank->accel_dev;
542 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
543 	u32 coalesc_timer = ADF_COALESCING_DEF_TIME;
544 
545 	adf_get_cfg_int(accel_dev,
546 			section,
547 			ADF_ETRMGR_COALESCE_TIMER_FORMAT,
548 			bank_num_in_accel,
549 			&coalesc_timer);
550 
551 	if (hw_data->get_clock_speed)
552 		bank->irq_coalesc_timer =
553 		    (coalesc_timer *
554 		     (hw_data->get_clock_speed(hw_data) / USEC_PER_SEC)) /
555 		    NSEC_PER_USEC;
556 	else
557 		bank->irq_coalesc_timer = coalesc_timer;
558 
559 	if (bank->irq_coalesc_timer > ADF_COALESCING_MAX_TIME)
560 		bank->irq_coalesc_timer = ADF_COALESCING_MAX_TIME;
561 	else if (bank->irq_coalesc_timer < ADF_COALESCING_MIN_TIME)
562 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
563 }
564 
565 static int
566 adf_init_bank(struct adf_accel_dev *accel_dev,
567 	      struct adf_etr_bank_data *bank,
568 	      u32 bank_num,
569 	      struct resource *csr_addr)
570 {
571 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
572 	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
573 	struct adf_etr_ring_data *ring;
574 	struct adf_etr_ring_data *tx_ring;
575 	u32 i, coalesc_enabled = 0;
576 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
577 	u32 irq_mask = BIT(num_rings_per_bank) - 1;
578 	u32 size = 0;
579 
580 	explicit_bzero(bank, sizeof(*bank));
581 	bank->bank_number = bank_num;
582 	bank->csr_addr = csr_addr;
583 	bank->accel_dev = accel_dev;
584 	mtx_init(&bank->lock, "adf bank", NULL, MTX_DEF);
585 
586 	/* Allocate the rings in the bank */
587 	size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
588 	bank->rings = kzalloc_node(size,
589 				   M_WAITOK | M_ZERO,
590 				   dev_to_node(GET_DEV(accel_dev)));
591 
592 	/* Enable IRQ coalescing always. This will allow to use
593 	 * the optimised flag and coalesc register.
594 	 * If it is disabled in the config file just use min time value */
595 	if ((adf_get_cfg_int(accel_dev,
596 			     "Accelerator0",
597 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
598 			     bank_num,
599 			     &coalesc_enabled) == 0) &&
600 	    coalesc_enabled)
601 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
602 	else
603 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
604 
605 	for (i = 0; i < num_rings_per_bank; i++) {
606 		csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
607 		csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
608 		ring = &bank->rings[i];
609 		if (hw_data->tx_rings_mask & (1 << i)) {
610 			ring->inflights =
611 			    kzalloc_node(sizeof(atomic_t),
612 					 M_WAITOK | M_ZERO,
613 					 dev_to_node(GET_DEV(accel_dev)));
614 		} else {
615 			if (i < hw_data->tx_rx_gap) {
616 				device_printf(GET_DEV(accel_dev),
617 					      "Invalid tx rings mask config\n");
618 				goto err;
619 			}
620 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
621 			ring->inflights = tx_ring->inflights;
622 		}
623 	}
624 
625 	if (adf_bank_debugfs_add(bank)) {
626 		device_printf(GET_DEV(accel_dev),
627 			      "Failed to add bank debugfs entry\n");
628 		goto err;
629 	}
630 
631 	csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
632 	csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
633 	return 0;
634 err:
635 	for (i = 0; i < num_rings_per_bank; i++) {
636 		ring = &bank->rings[i];
637 		if (hw_data->tx_rings_mask & (1 << i)) {
638 			kfree(ring->inflights);
639 			ring->inflights = NULL;
640 		}
641 	}
642 	kfree(bank->rings);
643 	return ENOMEM;
644 }
645 
646 /**
647  * adf_init_etr_data() - Initialize transport rings for acceleration device
648  * @accel_dev:  Pointer to acceleration device.
649  *
650  * Function initializes the communications channels (rings) to the
651  * acceleration device accel_dev.
652  * To be used by QAT device specific drivers.
653  *
654  * Return: 0 on success, error code otherwise.
655  */
656 int
657 adf_init_etr_data(struct adf_accel_dev *accel_dev)
658 {
659 	struct adf_etr_data *etr_data;
660 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
661 	struct resource *csr_addr;
662 	u32 size;
663 	u32 num_banks = 0;
664 	int i, ret;
665 
666 	etr_data = kzalloc_node(sizeof(*etr_data),
667 				M_WAITOK | M_ZERO,
668 				dev_to_node(GET_DEV(accel_dev)));
669 
670 	num_banks = GET_MAX_BANKS(accel_dev);
671 	size = num_banks * sizeof(struct adf_etr_bank_data);
672 	etr_data->banks = kzalloc_node(size,
673 				       M_WAITOK | M_ZERO,
674 				       dev_to_node(GET_DEV(accel_dev)));
675 
676 	accel_dev->transport = etr_data;
677 	i = hw_data->get_etr_bar_id(hw_data);
678 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
679 
680 	etr_data->debug =
681 	    SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx,
682 			    SYSCTL_CHILDREN(
683 				device_get_sysctl_tree(GET_DEV(accel_dev))),
684 			    OID_AUTO,
685 			    "transport",
686 			    CTLFLAG_RD,
687 			    NULL,
688 			    "Transport parameters");
689 	if (!etr_data->debug) {
690 		device_printf(GET_DEV(accel_dev),
691 			      "Unable to create transport debugfs entry\n");
692 		ret = ENOENT;
693 		goto err_bank_all;
694 	}
695 
696 	for (i = 0; i < num_banks; i++) {
697 		ret =
698 		    adf_init_bank(accel_dev, &etr_data->banks[i], i, csr_addr);
699 		if (ret)
700 			goto err_bank_all;
701 	}
702 
703 	return 0;
704 
705 err_bank_all:
706 	kfree(etr_data->banks);
707 	kfree(etr_data);
708 	accel_dev->transport = NULL;
709 	return ret;
710 }
711 
712 static void
713 cleanup_bank(struct adf_etr_bank_data *bank)
714 {
715 	u32 i;
716 	struct adf_accel_dev *accel_dev = bank->accel_dev;
717 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
718 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
719 
720 	for (i = 0; i < num_rings_per_bank; i++) {
721 		struct adf_accel_dev *accel_dev = bank->accel_dev;
722 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
723 		struct adf_etr_ring_data *ring = &bank->rings[i];
724 
725 		if (bank->ring_mask & (1 << i))
726 			adf_cleanup_ring(ring);
727 
728 		if (hw_data->tx_rings_mask & (1 << i)) {
729 			kfree(ring->inflights);
730 			ring->inflights = NULL;
731 		}
732 	}
733 	kfree(bank->rings);
734 	adf_bank_debugfs_rm(bank);
735 	mtx_destroy(&bank->lock);
736 	explicit_bzero(bank, sizeof(*bank));
737 }
738 
739 static void
740 adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
741 {
742 	struct adf_etr_data *etr_data = accel_dev->transport;
743 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
744 
745 	for (i = 0; i < num_banks; i++)
746 		cleanup_bank(&etr_data->banks[i]);
747 }
748 
749 /**
750  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
751  * @accel_dev:  Pointer to acceleration device.
752  *
753  * Function is the clears the communications channels (rings) of the
754  * acceleration device accel_dev.
755  * To be used by QAT device specific drivers.
756  *
757  * Return: void
758  */
759 void
760 adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
761 {
762 	struct adf_etr_data *etr_data = accel_dev->transport;
763 
764 	if (etr_data) {
765 		adf_cleanup_etr_handles(accel_dev);
766 		kfree(etr_data->banks);
767 		kfree(etr_data);
768 		accel_dev->transport = NULL;
769 	}
770 }
771