xref: /freebsd/sys/dev/qat/qat_common/adf_transport.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_transport_access_macros.h"
12 #include "adf_transport_internal.h"
13 #include <linux/delay.h>
14 #include "adf_accel_devices.h"
15 #include "adf_transport_internal.h"
16 #include "adf_transport_access_macros.h"
17 #include "adf_cfg.h"
18 #include "adf_common_drv.h"
19 
20 #define QAT_RING_ALIGNMENT 64
21 
22 static inline u32
adf_modulo(u32 data,u32 shift)23 adf_modulo(u32 data, u32 shift)
24 {
25 	u32 div = data >> shift;
26 	u32 mult = div << shift;
27 
28 	return data - mult;
29 }
30 
31 static inline int
adf_check_ring_alignment(u64 addr,u64 size)32 adf_check_ring_alignment(u64 addr, u64 size)
33 {
34 	if (((size - 1) & addr) != 0)
35 		return EFAULT;
36 	return 0;
37 }
38 
39 static int
adf_verify_ring_size(u32 msg_size,u32 msg_num)40 adf_verify_ring_size(u32 msg_size, u32 msg_num)
41 {
42 	int i = ADF_MIN_RING_SIZE;
43 
44 	for (; i <= ADF_MAX_RING_SIZE; i++)
45 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
46 			return i;
47 
48 	return ADF_DEFAULT_RING_SIZE;
49 }
50 
51 static int
adf_reserve_ring(struct adf_etr_bank_data * bank,u32 ring)52 adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
53 {
54 	mtx_lock(&bank->lock);
55 	if (bank->ring_mask & (1 << ring)) {
56 		mtx_unlock(&bank->lock);
57 		return EFAULT;
58 	}
59 	bank->ring_mask |= (1 << ring);
60 	mtx_unlock(&bank->lock);
61 	return 0;
62 }
63 
64 static void
adf_unreserve_ring(struct adf_etr_bank_data * bank,u32 ring)65 adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
66 {
67 	mtx_lock(&bank->lock);
68 	bank->ring_mask &= ~(1 << ring);
69 	mtx_unlock(&bank->lock);
70 }
71 
72 static void
adf_enable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)73 adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
74 {
75 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
76 	u32 enable_int_col_mask = 0;
77 
78 	if (csr_ops->get_int_col_ctl_enable_mask)
79 		enable_int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
80 
81 	mtx_lock(&bank->lock);
82 	bank->irq_mask |= (1 << ring);
83 	mtx_unlock(&bank->lock);
84 	csr_ops->write_csr_int_col_en(bank->csr_addr,
85 				      bank->bank_number,
86 				      bank->irq_mask);
87 	csr_ops->write_csr_int_col_ctl(bank->csr_addr,
88 				       bank->bank_number,
89 				       bank->irq_coalesc_timer |
90 					   enable_int_col_mask);
91 }
92 
93 static void
adf_disable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)94 adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
95 {
96 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
97 
98 	mtx_lock(&bank->lock);
99 	bank->irq_mask &= ~(1 << ring);
100 	mtx_unlock(&bank->lock);
101 	csr_ops->write_csr_int_col_en(bank->csr_addr,
102 				      bank->bank_number,
103 				      bank->irq_mask);
104 }
105 
106 int
adf_send_message(struct adf_etr_ring_data * ring,u32 * msg)107 adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
108 {
109 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
110 	u32 msg_size = 0;
111 
112 	if (atomic_add_return(1, ring->inflights) > ring->max_inflights) {
113 		atomic_dec(ring->inflights);
114 		return EAGAIN;
115 	}
116 
117 	msg_size = ADF_MSG_SIZE_TO_BYTES(ring->msg_size);
118 	mtx_lock(&ring->lock);
119 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail),
120 	       msg,
121 	       msg_size);
122 
123 	ring->tail = adf_modulo(ring->tail + msg_size,
124 				ADF_RING_SIZE_MODULO(ring->ring_size));
125 
126 	csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
127 				     ring->bank->bank_number,
128 				     ring->ring_number,
129 				     ring->tail);
130 	ring->csr_tail_offset = ring->tail;
131 	mtx_unlock(&ring->lock);
132 	return 0;
133 }
134 
135 int
adf_handle_response(struct adf_etr_ring_data * ring,u32 quota)136 adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
137 {
138 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
139 	u32 msg_counter = 0;
140 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
141 
142 	if (!quota)
143 		quota = ADF_NO_RESPONSE_QUOTA;
144 
145 	while ((*msg != ADF_RING_EMPTY_SIG) && (msg_counter < quota)) {
146 		ring->callback((u32 *)msg);
147 		atomic_dec(ring->inflights);
148 		*msg = ADF_RING_EMPTY_SIG;
149 		ring->head =
150 		    adf_modulo(ring->head +
151 				   ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
152 			       ADF_RING_SIZE_MODULO(ring->ring_size));
153 		msg_counter++;
154 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
155 	}
156 	if (msg_counter > 0)
157 		csr_ops->write_csr_ring_head(ring->bank->csr_addr,
158 					     ring->bank->bank_number,
159 					     ring->ring_number,
160 					     ring->head);
161 	return msg_counter;
162 }
163 
164 int
adf_poll_bank(u32 accel_id,u32 bank_num,u32 quota)165 adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota)
166 {
167 	int num_resp;
168 	struct adf_accel_dev *accel_dev;
169 	struct adf_etr_data *trans_data;
170 	struct adf_etr_bank_data *bank;
171 	struct adf_etr_ring_data *ring;
172 	struct adf_hw_csr_ops *csr_ops;
173 	u32 rings_not_empty;
174 	u32 ring_num;
175 	u32 resp_total = 0;
176 	u32 num_rings_per_bank;
177 
178 	/* Find the accel device associated with the accelId
179 	 * passed in.
180 	 */
181 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
182 	if (!accel_dev) {
183 		pr_err("There is no device with id: %d\n", accel_id);
184 		return EINVAL;
185 	}
186 
187 	csr_ops = GET_CSR_OPS(accel_dev);
188 	trans_data = accel_dev->transport;
189 	bank = &trans_data->banks[bank_num];
190 	mtx_lock(&bank->lock);
191 
192 	/* Read the ring status CSR to determine which rings are empty. */
193 	rings_not_empty =
194 	    csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
195 	/* Complement to find which rings have data to be processed. */
196 	rings_not_empty = (~rings_not_empty) & bank->ring_mask;
197 
198 	/* Return RETRY if the bank polling rings
199 	 * are all empty.
200 	 */
201 	if (!(rings_not_empty & bank->ring_mask)) {
202 		mtx_unlock(&bank->lock);
203 		return EAGAIN;
204 	}
205 
206 	/*
207 	 * Loop over all rings within this bank.
208 	 * The ring structure is global to all
209 	 * rings hence while we loop over all rings in the
210 	 * bank we use ring_number to get the global ring.
211 	 */
212 	num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
213 	for (ring_num = 0; ring_num < num_rings_per_bank; ring_num++) {
214 		ring = &bank->rings[ring_num];
215 
216 		/* And with polling ring mask.
217 		 * If the there is no data on this ring
218 		 * move to the next one.
219 		 */
220 		if (!(rings_not_empty & (1 << ring->ring_number)))
221 			continue;
222 
223 		/* Poll the ring. */
224 		num_resp = adf_handle_response(ring, quota);
225 		resp_total += num_resp;
226 	}
227 
228 	mtx_unlock(&bank->lock);
229 	/* Return SUCCESS if there's any response message
230 	 * returned.
231 	 */
232 	if (resp_total)
233 		return 0;
234 	return EAGAIN;
235 }
236 
237 int
adf_poll_all_banks(u32 accel_id,u32 quota)238 adf_poll_all_banks(u32 accel_id, u32 quota)
239 {
240 	int status = EAGAIN;
241 	struct adf_accel_dev *accel_dev;
242 	struct adf_etr_data *trans_data;
243 	struct adf_etr_bank_data *bank;
244 	u32 bank_num;
245 	u32 stat_total = 0;
246 
247 	/* Find the accel device associated with the accelId
248 	 * passed in.
249 	 */
250 	accel_dev = adf_devmgr_get_dev_by_id(accel_id);
251 	if (!accel_dev) {
252 		pr_err("There is no device with id: %d\n", accel_id);
253 		return EINVAL;
254 	}
255 
256 	/* Loop over banks and call adf_poll_bank */
257 	trans_data = accel_dev->transport;
258 	for (bank_num = 0; bank_num < GET_MAX_BANKS(accel_dev); bank_num++) {
259 		bank = &trans_data->banks[bank_num];
260 		/* if there are no polling rings on this bank
261 		 * continue to the next bank number.
262 		 */
263 		if (bank->ring_mask == 0)
264 			continue;
265 		status = adf_poll_bank(accel_id, bank_num, quota);
266 		/* The successful status should be AGAIN or 0 */
267 		if (status == 0)
268 			stat_total++;
269 		else if (status != EAGAIN)
270 			return status;
271 	}
272 
273 	/* Return SUCCESS if adf_poll_bank returned SUCCESS
274 	 * at any stage. adf_poll_bank cannot
275 	 * return fail in the above case.
276 	 */
277 	if (stat_total)
278 		return 0;
279 
280 	return EAGAIN;
281 }
282 
283 static void
adf_configure_tx_ring(struct adf_etr_ring_data * ring)284 adf_configure_tx_ring(struct adf_etr_ring_data *ring)
285 {
286 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
287 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
288 
289 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
290 				       ring->bank->bank_number,
291 				       ring->ring_number,
292 				       ring_config);
293 }
294 
295 static void
adf_configure_rx_ring(struct adf_etr_ring_data * ring)296 adf_configure_rx_ring(struct adf_etr_ring_data *ring)
297 {
298 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
299 	u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size,
300 						 ADF_RING_NEAR_WATERMARK_512,
301 						 ADF_RING_NEAR_WATERMARK_0);
302 
303 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
304 				       ring->bank->bank_number,
305 				       ring->ring_number,
306 				       ring_config);
307 }
308 
309 static int
adf_init_ring(struct adf_etr_ring_data * ring)310 adf_init_ring(struct adf_etr_ring_data *ring)
311 {
312 	struct adf_etr_bank_data *bank = ring->bank;
313 	struct adf_accel_dev *accel_dev = bank->accel_dev;
314 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
315 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
316 	u64 ring_base;
317 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
318 
319 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
320 	int ret;
321 
322 	ret = bus_dma_mem_create(&ring->dma_mem,
323 				 accel_dev->dma_tag,
324 				 ring_size_bytes,
325 				 BUS_SPACE_MAXADDR,
326 				 ring_size_bytes,
327 				 M_WAITOK | M_ZERO);
328 	if (ret)
329 		return ret;
330 	ring->base_addr = ring->dma_mem.dma_vaddr;
331 	ring->dma_addr = ring->dma_mem.dma_baddr;
332 
333 	memset(ring->base_addr, 0x7F, ring_size_bytes);
334 	/* The base_addr has to be aligned to the size of the buffer */
335 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
336 		device_printf(GET_DEV(accel_dev), "Ring address not aligned\n");
337 		bus_dma_mem_free(&ring->dma_mem);
338 		ring->base_addr = NULL;
339 		return EFAULT;
340 	}
341 
342 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
343 		adf_configure_tx_ring(ring);
344 	else
345 		adf_configure_rx_ring(ring);
346 
347 	ring_base =
348 	    csr_ops->build_csr_ring_base_addr(ring->dma_addr, ring->ring_size);
349 	csr_ops->write_csr_ring_base(ring->bank->csr_addr,
350 				     ring->bank->bank_number,
351 				     ring->ring_number,
352 				     ring_base);
353 	mtx_init(&ring->lock, "adf bank", NULL, MTX_DEF);
354 	return 0;
355 }
356 
357 static void
adf_cleanup_ring(struct adf_etr_ring_data * ring)358 adf_cleanup_ring(struct adf_etr_ring_data *ring)
359 {
360 	u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
361 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
362 
363 	if (ring->base_addr) {
364 		explicit_bzero(ring->base_addr, ring_size_bytes);
365 		bus_dma_mem_free(&ring->dma_mem);
366 	}
367 	mtx_destroy(&ring->lock);
368 }
369 
370 int
adf_create_ring(struct adf_accel_dev * accel_dev,const char * section,u32 bank_num,u32 num_msgs,u32 msg_size,const char * ring_name,adf_callback_fn callback,int poll_mode,struct adf_etr_ring_data ** ring_ptr)371 adf_create_ring(struct adf_accel_dev *accel_dev,
372 		const char *section,
373 		u32 bank_num,
374 		u32 num_msgs,
375 		u32 msg_size,
376 		const char *ring_name,
377 		adf_callback_fn callback,
378 		int poll_mode,
379 		struct adf_etr_ring_data **ring_ptr)
380 {
381 	struct adf_etr_data *transport_data = accel_dev->transport;
382 	struct adf_etr_bank_data *bank;
383 	struct adf_etr_ring_data *ring;
384 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
385 	u32 ring_num;
386 	int ret;
387 	u8 num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
388 
389 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
390 		device_printf(GET_DEV(accel_dev), "Invalid bank number\n");
391 		return EFAULT;
392 	}
393 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
394 		device_printf(GET_DEV(accel_dev), "Invalid msg size\n");
395 		return EFAULT;
396 	}
397 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
398 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
399 		device_printf(GET_DEV(accel_dev),
400 			      "Invalid ring size for given msg size\n");
401 		return EFAULT;
402 	}
403 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
404 		device_printf(GET_DEV(accel_dev),
405 			      "Section %s, no such entry : %s\n",
406 			      section,
407 			      ring_name);
408 		return EFAULT;
409 	}
410 	if (compat_strtouint(val, 10, &ring_num)) {
411 		device_printf(GET_DEV(accel_dev), "Can't get ring number\n");
412 		return EFAULT;
413 	}
414 	if (ring_num >= num_rings_per_bank) {
415 		device_printf(GET_DEV(accel_dev), "Invalid ring number\n");
416 		return EFAULT;
417 	}
418 
419 	bank = &transport_data->banks[bank_num];
420 	if (adf_reserve_ring(bank, ring_num)) {
421 		device_printf(GET_DEV(accel_dev),
422 			      "Ring %d, %s already exists.\n",
423 			      ring_num,
424 			      ring_name);
425 		return EFAULT;
426 	}
427 	ring = &bank->rings[ring_num];
428 	ring->ring_number = ring_num;
429 	ring->bank = bank;
430 	ring->callback = callback;
431 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
432 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
433 	ring->max_inflights =
434 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
435 	ring->head = 0;
436 	ring->tail = 0;
437 	ring->csr_tail_offset = 0;
438 	ret = adf_init_ring(ring);
439 	if (ret)
440 		goto err;
441 
442 	/* Enable HW arbitration for the given ring */
443 	adf_update_ring_arb(ring);
444 
445 	if (adf_ring_debugfs_add(ring, ring_name)) {
446 		device_printf(GET_DEV(accel_dev),
447 			      "Couldn't add ring debugfs entry\n");
448 		ret = EFAULT;
449 		goto err;
450 	}
451 
452 	/* Enable interrupts if needed */
453 	if (callback && !poll_mode)
454 		adf_enable_ring_irq(bank, ring->ring_number);
455 	*ring_ptr = ring;
456 	return 0;
457 err:
458 	adf_cleanup_ring(ring);
459 	adf_unreserve_ring(bank, ring_num);
460 	adf_update_ring_arb(ring);
461 	return ret;
462 }
463 
464 void
adf_remove_ring(struct adf_etr_ring_data * ring)465 adf_remove_ring(struct adf_etr_ring_data *ring)
466 {
467 	struct adf_etr_bank_data *bank = ring->bank;
468 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
469 
470 	/* Disable interrupts for the given ring */
471 	adf_disable_ring_irq(bank, ring->ring_number);
472 
473 	/* Clear PCI config space */
474 	csr_ops->write_csr_ring_config(bank->csr_addr,
475 				       bank->bank_number,
476 				       ring->ring_number,
477 				       0);
478 	csr_ops->write_csr_ring_base(bank->csr_addr,
479 				     bank->bank_number,
480 				     ring->ring_number,
481 				     0);
482 	adf_ring_debugfs_rm(ring);
483 	adf_unreserve_ring(bank, ring->ring_number);
484 	/* Disable HW arbitration for the given ring */
485 	adf_update_ring_arb(ring);
486 	adf_cleanup_ring(ring);
487 }
488 
489 static void
adf_ring_response_handler(struct adf_etr_bank_data * bank)490 adf_ring_response_handler(struct adf_etr_bank_data *bank)
491 {
492 	struct adf_accel_dev *accel_dev = bank->accel_dev;
493 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
494 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
495 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
496 	u32 empty_rings, i;
497 
498 	empty_rings =
499 	    csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
500 	empty_rings = ~empty_rings & bank->irq_mask;
501 
502 	for (i = 0; i < num_rings_per_bank; ++i) {
503 		if (empty_rings & (1 << i))
504 			adf_handle_response(&bank->rings[i], 0);
505 	}
506 }
507 
508 void
adf_response_handler(uintptr_t bank_addr)509 adf_response_handler(uintptr_t bank_addr)
510 {
511 	struct adf_etr_bank_data *bank = (void *)bank_addr;
512 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
513 
514 	/* Handle all the responses and re-enable IRQs */
515 	adf_ring_response_handler(bank);
516 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
517 					    bank->bank_number,
518 					    bank->irq_mask);
519 }
520 
521 static inline int
adf_get_cfg_int(struct adf_accel_dev * accel_dev,const char * section,const char * format,u32 key,u32 * value)522 adf_get_cfg_int(struct adf_accel_dev *accel_dev,
523 		const char *section,
524 		const char *format,
525 		u32 key,
526 		u32 *value)
527 {
528 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
529 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
530 
531 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
532 
533 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
534 		return EFAULT;
535 
536 	if (compat_strtouint(val_buf, 10, value))
537 		return EFAULT;
538 	return 0;
539 }
540 
541 static void
adf_get_coalesc_timer(struct adf_etr_bank_data * bank,const char * section,u32 bank_num_in_accel)542 adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
543 		      const char *section,
544 		      u32 bank_num_in_accel)
545 {
546 	struct adf_accel_dev *accel_dev = bank->accel_dev;
547 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
548 	u32 coalesc_timer = ADF_COALESCING_DEF_TIME;
549 
550 	adf_get_cfg_int(accel_dev,
551 			section,
552 			ADF_ETRMGR_COALESCE_TIMER_FORMAT,
553 			bank_num_in_accel,
554 			&coalesc_timer);
555 
556 	if (hw_data->get_clock_speed)
557 		bank->irq_coalesc_timer =
558 		    (coalesc_timer *
559 		     (hw_data->get_clock_speed(hw_data) / USEC_PER_SEC)) /
560 		    NSEC_PER_USEC;
561 	else
562 		bank->irq_coalesc_timer = coalesc_timer;
563 
564 	if (bank->irq_coalesc_timer > ADF_COALESCING_MAX_TIME)
565 		bank->irq_coalesc_timer = ADF_COALESCING_MAX_TIME;
566 	else if (bank->irq_coalesc_timer < ADF_COALESCING_MIN_TIME)
567 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
568 }
569 
570 static int
adf_init_bank(struct adf_accel_dev * accel_dev,struct adf_etr_bank_data * bank,u32 bank_num,struct resource * csr_addr)571 adf_init_bank(struct adf_accel_dev *accel_dev,
572 	      struct adf_etr_bank_data *bank,
573 	      u32 bank_num,
574 	      struct resource *csr_addr)
575 {
576 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
577 	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
578 	struct adf_etr_ring_data *ring;
579 	struct adf_etr_ring_data *tx_ring;
580 	u32 i, coalesc_enabled = 0;
581 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
582 	u32 irq_mask = BIT(num_rings_per_bank) - 1;
583 	u32 size = 0;
584 
585 	explicit_bzero(bank, sizeof(*bank));
586 	bank->bank_number = bank_num;
587 	bank->csr_addr = csr_addr;
588 	bank->accel_dev = accel_dev;
589 	mtx_init(&bank->lock, "adf bank", NULL, MTX_DEF);
590 
591 	/* Allocate the rings in the bank */
592 	size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
593 	bank->rings = kzalloc_node(size,
594 				   M_WAITOK | M_ZERO,
595 				   dev_to_node(GET_DEV(accel_dev)));
596 
597 	/* Enable IRQ coalescing always. This will allow to use
598 	 * the optimised flag and coalesc register.
599 	 * If it is disabled in the config file just use min time value */
600 	if ((adf_get_cfg_int(accel_dev,
601 			     "Accelerator0",
602 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
603 			     bank_num,
604 			     &coalesc_enabled) == 0) &&
605 	    coalesc_enabled)
606 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
607 	else
608 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
609 
610 	for (i = 0; i < num_rings_per_bank; i++) {
611 		csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
612 		csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
613 		ring = &bank->rings[i];
614 		if (hw_data->tx_rings_mask & (1 << i)) {
615 			ring->inflights =
616 			    kzalloc_node(sizeof(atomic_t),
617 					 M_WAITOK | M_ZERO,
618 					 dev_to_node(GET_DEV(accel_dev)));
619 		} else {
620 			if (i < hw_data->tx_rx_gap) {
621 				device_printf(GET_DEV(accel_dev),
622 					      "Invalid tx rings mask config\n");
623 				goto err;
624 			}
625 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
626 			ring->inflights = tx_ring->inflights;
627 		}
628 	}
629 
630 	if (adf_bank_debugfs_add(bank)) {
631 		device_printf(GET_DEV(accel_dev),
632 			      "Failed to add bank debugfs entry\n");
633 		goto err;
634 	}
635 
636 	csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
637 	csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
638 	return 0;
639 err:
640 	for (i = 0; i < num_rings_per_bank; i++) {
641 		ring = &bank->rings[i];
642 		if (hw_data->tx_rings_mask & (1 << i)) {
643 			kfree(ring->inflights);
644 			ring->inflights = NULL;
645 		}
646 	}
647 	kfree(bank->rings);
648 	return ENOMEM;
649 }
650 
651 /**
652  * adf_init_etr_data() - Initialize transport rings for acceleration device
653  * @accel_dev:  Pointer to acceleration device.
654  *
655  * Function initializes the communications channels (rings) to the
656  * acceleration device accel_dev.
657  * To be used by QAT device specific drivers.
658  *
659  * Return: 0 on success, error code otherwise.
660  */
661 int
adf_init_etr_data(struct adf_accel_dev * accel_dev)662 adf_init_etr_data(struct adf_accel_dev *accel_dev)
663 {
664 	struct adf_etr_data *etr_data;
665 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
666 	struct resource *csr_addr;
667 	u32 size;
668 	u32 num_banks = 0;
669 	int i, ret;
670 
671 	etr_data = kzalloc_node(sizeof(*etr_data),
672 				M_WAITOK | M_ZERO,
673 				dev_to_node(GET_DEV(accel_dev)));
674 
675 	num_banks = GET_MAX_BANKS(accel_dev);
676 	size = num_banks * sizeof(struct adf_etr_bank_data);
677 	etr_data->banks = kzalloc_node(size,
678 				       M_WAITOK | M_ZERO,
679 				       dev_to_node(GET_DEV(accel_dev)));
680 
681 	accel_dev->transport = etr_data;
682 	i = hw_data->get_etr_bar_id(hw_data);
683 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
684 
685 	etr_data->debug =
686 	    SYSCTL_ADD_NODE(&accel_dev->sysctl_ctx,
687 			    SYSCTL_CHILDREN(
688 				device_get_sysctl_tree(GET_DEV(accel_dev))),
689 			    OID_AUTO,
690 			    "transport",
691 			    CTLFLAG_RD,
692 			    NULL,
693 			    "Transport parameters");
694 	if (!etr_data->debug) {
695 		device_printf(GET_DEV(accel_dev),
696 			      "Unable to create transport debugfs entry\n");
697 		ret = ENOENT;
698 		goto err_bank_all;
699 	}
700 
701 	for (i = 0; i < num_banks; i++) {
702 		ret =
703 		    adf_init_bank(accel_dev, &etr_data->banks[i], i, csr_addr);
704 		if (ret)
705 			goto err_bank_all;
706 	}
707 
708 	return 0;
709 
710 err_bank_all:
711 	kfree(etr_data->banks);
712 	kfree(etr_data);
713 	accel_dev->transport = NULL;
714 	return ret;
715 }
716 
717 static void
cleanup_bank(struct adf_etr_bank_data * bank)718 cleanup_bank(struct adf_etr_bank_data *bank)
719 {
720 	u32 i;
721 	struct adf_accel_dev *accel_dev = bank->accel_dev;
722 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
723 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
724 
725 	for (i = 0; i < num_rings_per_bank; i++) {
726 		struct adf_accel_dev *accel_dev = bank->accel_dev;
727 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
728 		struct adf_etr_ring_data *ring = &bank->rings[i];
729 
730 		if (bank->ring_mask & (1 << i))
731 			adf_cleanup_ring(ring);
732 
733 		if (hw_data->tx_rings_mask & (1 << i)) {
734 			kfree(ring->inflights);
735 			ring->inflights = NULL;
736 		}
737 	}
738 	kfree(bank->rings);
739 	adf_bank_debugfs_rm(bank);
740 	mtx_destroy(&bank->lock);
741 	explicit_bzero(bank, sizeof(*bank));
742 }
743 
744 static void
adf_cleanup_etr_handles(struct adf_accel_dev * accel_dev)745 adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
746 {
747 	struct adf_etr_data *etr_data = accel_dev->transport;
748 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
749 
750 	for (i = 0; i < num_banks; i++)
751 		cleanup_bank(&etr_data->banks[i]);
752 }
753 
754 /**
755  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
756  * @accel_dev:  Pointer to acceleration device.
757  *
758  * Function is the clears the communications channels (rings) of the
759  * acceleration device accel_dev.
760  * To be used by QAT device specific drivers.
761  *
762  * Return: void
763  */
764 void
adf_cleanup_etr_data(struct adf_accel_dev * accel_dev)765 adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
766 {
767 	struct adf_etr_data *etr_data = accel_dev->transport;
768 
769 	if (etr_data) {
770 		adf_cleanup_etr_handles(accel_dev);
771 		kfree(etr_data->banks);
772 		kfree(etr_data);
773 		accel_dev->transport = NULL;
774 	}
775 }
776