xref: /freebsd/sys/dev/qat/qat_common/adf_transport.c (revision 70bc3f4331a1b6e7045ae5326cbe03428503b612)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <linux/delay.h>
15 #include "adf_accel_devices.h"
16 #include "adf_transport_internal.h"
17 #include "adf_transport_access_macros.h"
18 #include "adf_cfg.h"
19 #include "adf_common_drv.h"
20 
21 #define QAT_RING_ALIGNMENT 64
22 
23 static inline u32
24 adf_modulo(u32 data, u32 shift)
25 {
26 	u32 div = data >> shift;
27 	u32 mult = div << shift;
28 
29 	return data - mult;
30 }
31 
32 static inline int
33 adf_check_ring_alignment(u64 addr, u64 size)
34 {
35 	if (((size - 1) & addr) != 0)
36 		return EFAULT;
37 	return 0;
38 }
39 
40 static int
41 adf_verify_ring_size(u32 msg_size, u32 msg_num)
42 {
43 	int i = ADF_MIN_RING_SIZE;
44 
45 	for (; i <= ADF_MAX_RING_SIZE; i++)
46 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
47 			return i;
48 
49 	return ADF_DEFAULT_RING_SIZE;
50 }
51 
52 static int
53 adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
54 {
55 	mtx_lock(&bank->lock);
56 	if (bank->ring_mask & (1 << ring)) {
57 		mtx_unlock(&bank->lock);
58 		return EFAULT;
59 	}
60 	bank->ring_mask |= (1 << ring);
61 	mtx_unlock(&bank->lock);
62 	return 0;
63 }
64 
65 static void
66 adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
67 {
68 	mtx_lock(&bank->lock);
69 	bank->ring_mask &= ~(1 << ring);
70 	mtx_unlock(&bank->lock);
71 }
72 
73 static void
74 adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
75 {
76 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
77 	u32 enable_int_col_mask = 0;
78 
79 	if (csr_ops->get_int_col_ctl_enable_mask)
80 		enable_int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
81 
82 	mtx_lock(&bank->lock);
83 	bank->irq_mask |= (1 << ring);
84 	mtx_unlock(&bank->lock);
85 	csr_ops->write_csr_int_col_en(bank->csr_addr,
86 				      bank->bank_number,
87 				      bank->irq_mask);
88 	csr_ops->write_csr_int_col_ctl(bank->csr_addr,
89 				       bank->bank_number,
90 				       bank->irq_coalesc_timer |
91 					   enable_int_col_mask);
92 }
93 
94 static void
95 adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
96 {
97 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
98 
99 	mtx_lock(&bank->lock);
100 	bank->irq_mask &= ~(1 << ring);
101 	mtx_unlock(&bank->lock);
102 	csr_ops->write_csr_int_col_en(bank->csr_addr,
103 				      bank->bank_number,
104 				      bank->irq_mask);
105 }
106 
107 int
108 adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
109 {
110 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
111 	u32 msg_size = 0;
112 
113 	if (atomic_add_return(1, ring->inflights) > ring->max_inflights) {
114 		atomic_dec(ring->inflights);
115 		return EAGAIN;
116 	}
117 
118 	msg_size = ADF_MSG_SIZE_TO_BYTES(ring->msg_size);
119 	mtx_lock(&ring->lock);
120 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail),
121 	       msg,
122 	       msg_size);
123 
124 	ring->tail = adf_modulo(ring->tail + msg_size,
125 				ADF_RING_SIZE_MODULO(ring->ring_size));
126 
127 	csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
128 				     ring->bank->bank_number,
129 				     ring->ring_number,
130 				     ring->tail);
131 	ring->csr_tail_offset = ring->tail;
132 	mtx_unlock(&ring->lock);
133 	return 0;
134 }
135 
136 int
137 adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
138 {
139 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
140 	u32 msg_counter = 0;
141 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
142 
143 	if (!quota)
144 		quota = ADF_NO_RESPONSE_QUOTA;
145 
146 	while ((*msg != ADF_RING_EMPTY_SIG) && (msg_counter < quota)) {
147 		ring->callback((u32 *)msg);
148 		atomic_dec(ring->inflights);
149 		*msg = ADF_RING_EMPTY_SIG;
150 		ring->head =
151 		    adf_modulo(ring->head +
152 				   ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
153 			       ADF_RING_SIZE_MODULO(ring->ring_size));
154 		msg_counter++;
155 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
156 	}
157 	if (msg_counter > 0)
158 		csr_ops->write_csr_ring_head(ring->bank->csr_addr,
159 					     ring->bank->bank_number,
160 					     ring->ring_number,
161 					     ring->head);
162 	return msg_counter;
163 }
164 
165 int
166 adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota)
167 {
168 	int num_resp;
169 	struct adf_accel_dev *accel_dev;
170 	struct adf_etr_data *trans_data;
171 	struct adf_etr_bank_data *bank;
172 	struct adf_etr_ring_data *ring;
173 	struct adf_hw_csr_ops *csr_ops;
174 	u32 rings_not_empty;
175 	u32 ring_num;
176 	u32 resp_total = 0;
177 	u32 num_rings_per_bank;
178 
179 	/* Find the accel device associated with the accelId
180 	 * passed in.
181 	 */
182 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
183 	if (!accel_dev) {
184 		pr_err("There is no device with id: %d\n", accel_id);
185 		return EINVAL;
186 	}
187 
188 	csr_ops = GET_CSR_OPS(accel_dev);
189 	trans_data = accel_dev->transport;
190 	bank = &trans_data->banks[bank_num];
191 	mtx_lock(&bank->lock);
192 
193 	/* Read the ring status CSR to determine which rings are empty. */
194 	rings_not_empty =
195 	    csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
196 	/* Complement to find which rings have data to be processed. */
197 	rings_not_empty = (~rings_not_empty) & bank->ring_mask;
198 
199 	/* Return RETRY if the bank polling rings
200 	 * are all empty.
201 	 */
202 	if (!(rings_not_empty & bank->ring_mask)) {
203 		mtx_unlock(&bank->lock);
204 		return EAGAIN;
205 	}
206 
207 	/*
208 	 * Loop over all rings within this bank.
209 	 * The ring structure is global to all
210 	 * rings hence while we loop over all rings in the
211 	 * bank we use ring_number to get the global ring.
212 	 */
213 	num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
214 	for (ring_num = 0; ring_num < num_rings_per_bank; ring_num++) {
215 		ring = &bank->rings[ring_num];
216 
217 		/* And with polling ring mask.
218 		 * If the there is no data on this ring
219 		 * move to the next one.
220 		 */
221 		if (!(rings_not_empty & (1 << ring->ring_number)))
222 			continue;
223 
224 		/* Poll the ring. */
225 		num_resp = adf_handle_response(ring, quota);
226 		resp_total += num_resp;
227 	}
228 
229 	mtx_unlock(&bank->lock);
230 	/* Return SUCCESS if there's any response message
231 	 * returned.
232 	 */
233 	if (resp_total)
234 		return 0;
235 	return EAGAIN;
236 }
237 
238 int
239 adf_poll_all_banks(u32 accel_id, u32 quota)
240 {
241 	int status = EAGAIN;
242 	struct adf_accel_dev *accel_dev;
243 	struct adf_etr_data *trans_data;
244 	struct adf_etr_bank_data *bank;
245 	u32 bank_num;
246 	u32 stat_total = 0;
247 
248 	/* Find the accel device associated with the accelId
249 	 * passed in.
250 	 */
251 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
252 	if (!accel_dev) {
253 		pr_err("There is no device with id: %d\n", accel_id);
254 		return EINVAL;
255 	}
256 
257 	/* Loop over banks and call adf_poll_bank */
258 	trans_data = accel_dev->transport;
259 	for (bank_num = 0; bank_num < GET_MAX_BANKS(accel_dev); bank_num++) {
260 		bank = &trans_data->banks[bank_num];
261 		/* if there are no polling rings on this bank
262 		 * continue to the next bank number.
263 		 */
264 		if (bank->ring_mask == 0)
265 			continue;
266 		status = adf_poll_bank(accel_id, bank_num, quota);
267 		/* The successful status should be AGAIN or 0 */
268 		if (status == 0)
269 			stat_total++;
270 		else if (status != EAGAIN)
271 			return status;
272 	}
273 
274 	/* Return SUCCESS if adf_poll_bank returned SUCCESS
275 	 * at any stage. adf_poll_bank cannot
276 	 * return fail in the above case.
277 	 */
278 	if (stat_total)
279 		return 0;
280 
281 	return EAGAIN;
282 }
283 
284 static void
285 adf_configure_tx_ring(struct adf_etr_ring_data *ring)
286 {
287 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
288 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
289 
290 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
291 				       ring->bank->bank_number,
292 				       ring->ring_number,
293 				       ring_config);
294 }
295 
296 static void
297 adf_configure_rx_ring(struct adf_etr_ring_data *ring)
298 {
299 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
300 	u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size,
301 						 ADF_RING_NEAR_WATERMARK_512,
302 						 ADF_RING_NEAR_WATERMARK_0);
303 
304 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
305 				       ring->bank->bank_number,
306 				       ring->ring_number,
307 				       ring_config);
308 }
309 
310 static int
311 adf_init_ring(struct adf_etr_ring_data *ring)
312 {
313 	struct adf_etr_bank_data *bank = ring->bank;
314 	struct adf_accel_dev *accel_dev = bank->accel_dev;
315 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
316 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
317 	u64 ring_base;
318 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
319 
320 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
321 	int ret;
322 
323 	ret = bus_dma_mem_create(&ring->dma_mem,
324 				 accel_dev->dma_tag,
325 				 ring_size_bytes,
326 				 BUS_SPACE_MAXADDR,
327 				 ring_size_bytes,
328 				 M_WAITOK | M_ZERO);
329 	if (ret)
330 		return ret;
331 	ring->base_addr = ring->dma_mem.dma_vaddr;
332 	ring->dma_addr = ring->dma_mem.dma_baddr;
333 
334 	memset(ring->base_addr, 0x7F, ring_size_bytes);
335 	/* The base_addr has to be aligned to the size of the buffer */
336 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
337 		device_printf(GET_DEV(accel_dev), "Ring address not aligned\n");
338 		bus_dma_mem_free(&ring->dma_mem);
339 		ring->base_addr = NULL;
340 		return EFAULT;
341 	}
342 
343 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
344 		adf_configure_tx_ring(ring);
345 	else
346 		adf_configure_rx_ring(ring);
347 
348 	ring_base =
349 	    csr_ops->build_csr_ring_base_addr(ring->dma_addr, ring->ring_size);
350 	csr_ops->write_csr_ring_base(ring->bank->csr_addr,
351 				     ring->bank->bank_number,
352 				     ring->ring_number,
353 				     ring_base);
354 	mtx_init(&ring->lock, "adf bank", NULL, MTX_DEF);
355 	return 0;
356 }
357 
358 static void
359 adf_cleanup_ring(struct adf_etr_ring_data *ring)
360 {
361 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
362 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
363 
364 	if (ring->base_addr) {
365 		explicit_bzero(ring->base_addr, ring_size_bytes);
366 		bus_dma_mem_free(&ring->dma_mem);
367 	}
368 	mtx_destroy(&ring->lock);
369 }
370 
371 int
372 adf_create_ring(struct adf_accel_dev *accel_dev,
373 		const char *section,
374 		u32 bank_num,
375 		u32 num_msgs,
376 		u32 msg_size,
377 		const char *ring_name,
378 		adf_callback_fn callback,
379 		int poll_mode,
380 		struct adf_etr_ring_data **ring_ptr)
381 {
382 	struct adf_etr_data *transport_data = accel_dev->transport;
383 	struct adf_etr_bank_data *bank;
384 	struct adf_etr_ring_data *ring;
385 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
386 	u32 ring_num;
387 	int ret;
388 	u8 num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
389 
390 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
391 		device_printf(GET_DEV(accel_dev), "Invalid bank number\n");
392 		return EFAULT;
393 	}
394 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
395 		device_printf(GET_DEV(accel_dev), "Invalid msg size\n");
396 		return EFAULT;
397 	}
398 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
399 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
400 		device_printf(GET_DEV(accel_dev),
401 			      "Invalid ring size for given msg size\n");
402 		return EFAULT;
403 	}
404 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
405 		device_printf(GET_DEV(accel_dev),
406 			      "Section %s, no such entry : %s\n",
407 			      section,
408 			      ring_name);
409 		return EFAULT;
410 	}
411 	if (compat_strtouint(val, 10, &ring_num)) {
412 		device_printf(GET_DEV(accel_dev), "Can't get ring number\n");
413 		return EFAULT;
414 	}
415 	if (ring_num >= num_rings_per_bank) {
416 		device_printf(GET_DEV(accel_dev), "Invalid ring number\n");
417 		return EFAULT;
418 	}
419 
420 	bank = &transport_data->banks[bank_num];
421 	if (adf_reserve_ring(bank, ring_num)) {
422 		device_printf(GET_DEV(accel_dev),
423 			      "Ring %d, %s already exists.\n",
424 			      ring_num,
425 			      ring_name);
426 		return EFAULT;
427 	}
428 	ring = &bank->rings[ring_num];
429 	ring->ring_number = ring_num;
430 	ring->bank = bank;
431 	ring->callback = callback;
432 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
433 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
434 	ring->max_inflights =
435 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
436 	ring->head = 0;
437 	ring->tail = 0;
438 	ring->csr_tail_offset = 0;
439 	ret = adf_init_ring(ring);
440 	if (ret)
441 		goto err;
442 
443 	/* Enable HW arbitration for the given ring */
444 	adf_update_ring_arb(ring);
445 
446 	if (adf_ring_debugfs_add(ring, ring_name)) {
447 		device_printf(GET_DEV(accel_dev),
448 			      "Couldn't add ring debugfs entry\n");
449 		ret = EFAULT;
450 		goto err;
451 	}
452 
453 	/* Enable interrupts if needed */
454 	if (callback && !poll_mode)
455 		adf_enable_ring_irq(bank, ring->ring_number);
456 	*ring_ptr = ring;
457 	return 0;
458 err:
459 	adf_cleanup_ring(ring);
460 	adf_unreserve_ring(bank, ring_num);
461 	adf_update_ring_arb(ring);
462 	return ret;
463 }
464 
465 void
466 adf_remove_ring(struct adf_etr_ring_data *ring)
467 {
468 	struct adf_etr_bank_data *bank = ring->bank;
469 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
470 
471 	/* Disable interrupts for the given ring */
472 	adf_disable_ring_irq(bank, ring->ring_number);
473 
474 	/* Clear PCI config space */
475 	csr_ops->write_csr_ring_config(bank->csr_addr,
476 				       bank->bank_number,
477 				       ring->ring_number,
478 				       0);
479 	csr_ops->write_csr_ring_base(bank->csr_addr,
480 				     bank->bank_number,
481 				     ring->ring_number,
482 				     0);
483 	adf_ring_debugfs_rm(ring);
484 	adf_unreserve_ring(bank, ring->ring_number);
485 	/* Disable HW arbitration for the given ring */
486 	adf_update_ring_arb(ring);
487 	adf_cleanup_ring(ring);
488 }
489 
490 static void
491 adf_ring_response_handler(struct adf_etr_bank_data *bank)
492 {
493 	struct adf_accel_dev *accel_dev = bank->accel_dev;
494 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
495 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
496 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
497 	u32 empty_rings, i;
498 
499 	empty_rings =
500 	    csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
501 	empty_rings = ~empty_rings & bank->irq_mask;
502 
503 	for (i = 0; i < num_rings_per_bank; ++i) {
504 		if (empty_rings & (1 << i))
505 			adf_handle_response(&bank->rings[i], 0);
506 	}
507 }
508 
509 void
510 adf_response_handler(uintptr_t bank_addr)
511 {
512 	struct adf_etr_bank_data *bank = (void *)bank_addr;
513 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
514 
515 	/* Handle all the responses and re-enable IRQs */
516 	adf_ring_response_handler(bank);
517 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
518 					    bank->bank_number,
519 					    bank->irq_mask);
520 }
521 
522 static inline int
523 adf_get_cfg_int(struct adf_accel_dev *accel_dev,
524 		const char *section,
525 		const char *format,
526 		u32 key,
527 		u32 *value)
528 {
529 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
530 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
531 
532 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
533 
534 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
535 		return EFAULT;
536 
537 	if (compat_strtouint(val_buf, 10, value))
538 		return EFAULT;
539 	return 0;
540 }
541 
542 static void
543 adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
544 		      const char *section,
545 		      u32 bank_num_in_accel)
546 {
547 	struct adf_accel_dev *accel_dev = bank->accel_dev;
548 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
549 	u32 coalesc_timer = ADF_COALESCING_DEF_TIME;
550 
551 	adf_get_cfg_int(accel_dev,
552 			section,
553 			ADF_ETRMGR_COALESCE_TIMER_FORMAT,
554 			bank_num_in_accel,
555 			&coalesc_timer);
556 
557 	if (hw_data->get_clock_speed)
558 		bank->irq_coalesc_timer =
559 		    (coalesc_timer *
560 		     (hw_data->get_clock_speed(hw_data) / USEC_PER_SEC)) /
561 		    NSEC_PER_USEC;
562 	else
563 		bank->irq_coalesc_timer = coalesc_timer;
564 
565 	if (bank->irq_coalesc_timer > ADF_COALESCING_MAX_TIME)
566 		bank->irq_coalesc_timer = ADF_COALESCING_MAX_TIME;
567 	else if (bank->irq_coalesc_timer < ADF_COALESCING_MIN_TIME)
568 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
569 }
570 
571 static int
572 adf_init_bank(struct adf_accel_dev *accel_dev,
573 	      struct adf_etr_bank_data *bank,
574 	      u32 bank_num,
575 	      struct resource *csr_addr)
576 {
577 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
578 	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
579 	struct adf_etr_ring_data *ring;
580 	struct adf_etr_ring_data *tx_ring;
581 	u32 i, coalesc_enabled = 0;
582 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
583 	u32 irq_mask = BIT(num_rings_per_bank) - 1;
584 	u32 size = 0;
585 
586 	explicit_bzero(bank, sizeof(*bank));
587 	bank->bank_number = bank_num;
588 	bank->csr_addr = csr_addr;
589 	bank->accel_dev = accel_dev;
590 	mtx_init(&bank->lock, "adf bank", NULL, MTX_DEF);
591 
592 	/* Allocate the rings in the bank */
593 	size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
594 	bank->rings = kzalloc_node(size,
595 				   M_WAITOK | M_ZERO,
596 				   dev_to_node(GET_DEV(accel_dev)));
597 
598 	/* Enable IRQ coalescing always. This will allow to use
599 	 * the optimised flag and coalesc register.
600 	 * If it is disabled in the config file just use min time value */
601 	if ((adf_get_cfg_int(accel_dev,
602 			     "Accelerator0",
603 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
604 			     bank_num,
605 			     &coalesc_enabled) == 0) &&
606 	    coalesc_enabled)
607 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
608 	else
609 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
610 
611 	for (i = 0; i < num_rings_per_bank; i++) {
612 		csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
613 		csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
614 		ring = &bank->rings[i];
615 		if (hw_data->tx_rings_mask & (1 << i)) {
616 			ring->inflights =
617 			    kzalloc_node(sizeof(atomic_t),
618 					 M_WAITOK | M_ZERO,
619 					 dev_to_node(GET_DEV(accel_dev)));
620 		} else {
621 			if (i < hw_data->tx_rx_gap) {
622 				device_printf(GET_DEV(accel_dev),
623 					      "Invalid tx rings mask config\n");
624 				goto err;
625 			}
626 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
627 			ring->inflights = tx_ring->inflights;
628 		}
629 	}
630 
631 	if (adf_bank_debugfs_add(bank)) {
632 		device_printf(GET_DEV(accel_dev),
633 			      "Failed to add bank debugfs entry\n");
634 		goto err;
635 	}
636 
637 	csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
638 	csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
639 	return 0;
640 err:
641 	for (i = 0; i < num_rings_per_bank; i++) {
642 		ring = &bank->rings[i];
643 		if (hw_data->tx_rings_mask & (1 << i)) {
644 			kfree(ring->inflights);
645 			ring->inflights = NULL;
646 		}
647 	}
648 	kfree(bank->rings);
649 	return ENOMEM;
650 }
651 
652 /**
653  * adf_init_etr_data() - Initialize transport rings for acceleration device
654  * @accel_dev:  Pointer to acceleration device.
655  *
656  * Function initializes the communications channels (rings) to the
657  * acceleration device accel_dev.
658  * To be used by QAT device specific drivers.
659  *
660  * Return: 0 on success, error code otherwise.
661  */
662 int
663 adf_init_etr_data(struct adf_accel_dev *accel_dev)
664 {
665 	struct adf_etr_data *etr_data;
666 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
667 	struct resource *csr_addr;
668 	u32 size;
669 	u32 num_banks = 0;
670 	int i, ret;
671 
672 	etr_data = kzalloc_node(sizeof(*etr_data),
673 				M_WAITOK | M_ZERO,
674 				dev_to_node(GET_DEV(accel_dev)));
675 
676 	num_banks = GET_MAX_BANKS(accel_dev);
677 	size = num_banks * sizeof(struct adf_etr_bank_data);
678 	etr_data->banks = kzalloc_node(size,
679 				       M_WAITOK | M_ZERO,
680 				       dev_to_node(GET_DEV(accel_dev)));
681 
682 	accel_dev->transport = etr_data;
683 	i = hw_data->get_etr_bar_id(hw_data);
684 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
685 
686 	etr_data->debug =
687 	    SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx,
688 			    SYSCTL_CHILDREN(
689 				device_get_sysctl_tree(GET_DEV(accel_dev))),
690 			    OID_AUTO,
691 			    "transport",
692 			    CTLFLAG_RD,
693 			    NULL,
694 			    "Transport parameters");
695 	if (!etr_data->debug) {
696 		device_printf(GET_DEV(accel_dev),
697 			      "Unable to create transport debugfs entry\n");
698 		ret = ENOENT;
699 		goto err_bank_all;
700 	}
701 
702 	for (i = 0; i < num_banks; i++) {
703 		ret =
704 		    adf_init_bank(accel_dev, &etr_data->banks[i], i, csr_addr);
705 		if (ret)
706 			goto err_bank_all;
707 	}
708 
709 	return 0;
710 
711 err_bank_all:
712 	kfree(etr_data->banks);
713 	kfree(etr_data);
714 	accel_dev->transport = NULL;
715 	return ret;
716 }
717 
718 static void
719 cleanup_bank(struct adf_etr_bank_data *bank)
720 {
721 	u32 i;
722 	struct adf_accel_dev *accel_dev = bank->accel_dev;
723 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
724 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
725 
726 	for (i = 0; i < num_rings_per_bank; i++) {
727 		struct adf_accel_dev *accel_dev = bank->accel_dev;
728 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
729 		struct adf_etr_ring_data *ring = &bank->rings[i];
730 
731 		if (bank->ring_mask & (1 << i))
732 			adf_cleanup_ring(ring);
733 
734 		if (hw_data->tx_rings_mask & (1 << i)) {
735 			kfree(ring->inflights);
736 			ring->inflights = NULL;
737 		}
738 	}
739 	kfree(bank->rings);
740 	adf_bank_debugfs_rm(bank);
741 	mtx_destroy(&bank->lock);
742 	explicit_bzero(bank, sizeof(*bank));
743 }
744 
745 static void
746 adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
747 {
748 	struct adf_etr_data *etr_data = accel_dev->transport;
749 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
750 
751 	for (i = 0; i < num_banks; i++)
752 		cleanup_bank(&etr_data->banks[i]);
753 }
754 
755 /**
756  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
757  * @accel_dev:  Pointer to acceleration device.
758  *
759  * Function is the clears the communications channels (rings) of the
760  * acceleration device accel_dev.
761  * To be used by QAT device specific drivers.
762  *
763  * Return: void
764  */
765 void
766 adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
767 {
768 	struct adf_etr_data *etr_data = accel_dev->transport;
769 
770 	if (etr_data) {
771 		adf_cleanup_etr_handles(accel_dev);
772 		kfree(etr_data->banks);
773 		kfree(etr_data);
774 		accel_dev->transport = NULL;
775 	}
776 }
777