xref: /freebsd/sys/dev/qat/qat_common/adf_init.c (revision ae1dc27639c88038d150660c4096753e210aec42)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2025 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_dbgfs.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_dev_err.h"
13 #include "adf_uio.h"
14 #include "adf_transport_access_macros.h"
15 #include "adf_transport_internal.h"
16 #include <sys/mutex.h>
17 #include <linux/delay.h>
18 #include "adf_accel_devices.h"
19 #include "adf_cfg.h"
20 #include "adf_common_drv.h"
21 #include "icp_qat_fw.h"
22 
23 #if defined(QAT_UIO)
24 #include "adf_cfg_device.h"
25 #endif /* QAT_UIO*/
26 
27 /* Mask used to check the CompressAndVerify capability bit */
28 #define DC_CNV_EXTENDED_CAPABILITY (0x01)
29 
30 /* Mask used to check the CompressAndVerifyAndRecover capability bit */
31 #define DC_CNVNR_EXTENDED_CAPABILITY (0x100)
32 
33 static LIST_HEAD(service_table);
34 static DEFINE_MUTEX(service_lock);
35 
36 static int adf_dev_init_locked(struct adf_accel_dev *accel_dev);
37 static int adf_dev_start_locked(struct adf_accel_dev *accel_dev);
38 static int adf_dev_stop_locked(struct adf_accel_dev *accel_dev);
39 static void adf_dev_shutdown_locked(struct adf_accel_dev *accel_dev);
40 
41 static void
adf_service_add(struct service_hndl * service)42 adf_service_add(struct service_hndl *service)
43 {
44 	mutex_lock(&service_lock);
45 	list_add(&service->list, &service_table);
46 	mutex_unlock(&service_lock);
47 }
48 
49 int
adf_service_register(struct service_hndl * service)50 adf_service_register(struct service_hndl *service)
51 {
52 	memset(service->init_status, 0, sizeof(service->init_status));
53 	memset(service->start_status, 0, sizeof(service->start_status));
54 	adf_service_add(service);
55 	return 0;
56 }
57 
58 static void
adf_service_remove(struct service_hndl * service)59 adf_service_remove(struct service_hndl *service)
60 {
61 	mutex_lock(&service_lock);
62 	list_del(&service->list);
63 	mutex_unlock(&service_lock);
64 }
65 
66 int
adf_service_unregister(struct service_hndl * service)67 adf_service_unregister(struct service_hndl *service)
68 {
69 	int i;
70 
71 	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
72 		if (service->init_status[i] || service->start_status[i]) {
73 			pr_err("QAT: Could not remove active service [%d]\n",
74 			       i);
75 			return EFAULT;
76 		}
77 	}
78 	adf_service_remove(service);
79 	return 0;
80 }
81 
82 static int
adf_cfg_add_device_params(struct adf_accel_dev * accel_dev)83 adf_cfg_add_device_params(struct adf_accel_dev *accel_dev)
84 {
85 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
86 	char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
87 	char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
88 	struct adf_hw_device_data *hw_data = NULL;
89 	unsigned long val;
90 	if (!accel_dev)
91 		return -EINVAL;
92 
93 	hw_data = accel_dev->hw_device;
94 
95 	if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
96 		goto err;
97 
98 	snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS);
99 	val = GET_MAX_BANKS(accel_dev);
100 	if (adf_cfg_add_key_value_param(
101 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
102 		goto err;
103 
104 	snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK);
105 	val = hw_data->accel_capabilities_mask;
106 	if (adf_cfg_add_key_value_param(
107 		accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX))
108 		goto err;
109 
110 	snprintf(key, sizeof(key), ADF_DEV_PKG_ID);
111 	val = accel_dev->accel_id;
112 	if (adf_cfg_add_key_value_param(
113 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
114 		goto err;
115 
116 	snprintf(key, sizeof(key), ADF_DEV_NODE_ID);
117 	val = dev_to_node(GET_DEV(accel_dev));
118 	if (adf_cfg_add_key_value_param(
119 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
120 		goto err;
121 
122 	snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK);
123 	val = hw_data->num_rings_per_bank;
124 	if (adf_cfg_add_key_value_param(
125 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
126 		goto err;
127 
128 	snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY);
129 	snprintf(hw_version,
130 		 ADF_CFG_MAX_VAL_LEN_IN_BYTES,
131 		 "%d",
132 		 accel_dev->accel_pci_dev.revid);
133 	if (adf_cfg_add_key_value_param(
134 		accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR))
135 		goto err;
136 
137 	snprintf(key, sizeof(key), ADF_MMP_VER_KEY);
138 	snprintf(mmp_version,
139 		 ADF_CFG_MAX_VAL_LEN_IN_BYTES,
140 		 "%d.%d.%d",
141 		 accel_dev->fw_versions.mmp_version_major,
142 		 accel_dev->fw_versions.mmp_version_minor,
143 		 accel_dev->fw_versions.mmp_version_patch);
144 	if (adf_cfg_add_key_value_param(
145 		accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR))
146 		goto err;
147 
148 	return 0;
149 err:
150 	device_printf(GET_DEV(accel_dev),
151 		      "Failed to add internal values to accel_dev cfg\n");
152 	return -EINVAL;
153 }
154 
155 static int
adf_cfg_add_fw_version(struct adf_accel_dev * accel_dev)156 adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev)
157 {
158 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
159 	char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
160 
161 	snprintf(key, sizeof(key), ADF_UOF_VER_KEY);
162 	snprintf(fw_version,
163 		 ADF_CFG_MAX_VAL_LEN_IN_BYTES,
164 		 "%d.%d.%d",
165 		 accel_dev->fw_versions.fw_version_major,
166 		 accel_dev->fw_versions.fw_version_minor,
167 		 accel_dev->fw_versions.fw_version_patch);
168 	if (adf_cfg_add_key_value_param(
169 		accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR))
170 		return EFAULT;
171 
172 	return 0;
173 }
174 
175 static int
adf_cfg_add_ext_params(struct adf_accel_dev * accel_dev)176 adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev)
177 {
178 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
179 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
180 	unsigned long val;
181 
182 	snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES);
183 
184 	val = hw_data->extended_dc_capabilities;
185 	if (adf_cfg_add_key_value_param(
186 		accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX))
187 		return -EINVAL;
188 
189 	return 0;
190 }
191 
192 void
adf_error_notifier(uintptr_t arg)193 adf_error_notifier(uintptr_t arg)
194 {
195 	struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg;
196 	struct service_hndl *service;
197 	struct list_head *list_itr;
198 
199 	list_for_each(list_itr, &service_table)
200 	{
201 		service = list_entry(list_itr, struct service_hndl, list);
202 		if (service->event_hld(accel_dev, ADF_EVENT_ERROR))
203 			device_printf(GET_DEV(accel_dev),
204 				      "Failed to send error event to %s.\n",
205 				      service->name);
206 	}
207 }
208 
209 /**
210  * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer.
211  *
212  * Return: 0 on success, error code otherwise.
213  */
214 int
adf_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)215 adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
216 {
217 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
218 	struct adf_bar *misc_bar =
219 	    &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
220 	struct resource *csr = misc_bar->virt_addr;
221 	u32 i;
222 	unsigned int mask;
223 	u32 clk_per_sec = hw_data->get_clock_speed(hw_data);
224 	u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000);
225 	u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE;
226 	char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
227 
228 	/* Get Watch Dog Timer for CySym+Comp from the configuration */
229 	if (!adf_cfg_get_param_value(accel_dev,
230 				     ADF_GENERAL_SEC,
231 				     ADF_DEV_SSM_WDT_BULK,
232 				     (char *)timer_str)) {
233 		if (!compat_strtouint((char *)timer_str,
234 				      ADF_CFG_BASE_DEC,
235 				      &timer_val))
236 			/* Convert msec to CPP clocks */
237 			timer_val = timer_val * (clk_per_sec / 1000);
238 	}
239 	/* Get Watch Dog Timer for CyAsym from the configuration */
240 	if (!adf_cfg_get_param_value(accel_dev,
241 				     ADF_GENERAL_SEC,
242 				     ADF_DEV_SSM_WDT_PKE,
243 				     (char *)timer_str)) {
244 		if (!compat_strtouint((char *)timer_str,
245 				      ADF_CFG_BASE_DEC,
246 				      &timer_val_pke))
247 			/* Convert msec to CPP clocks */
248 			timer_val_pke = timer_val_pke * (clk_per_sec / 1000);
249 	}
250 
251 	for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) {
252 		if (!(mask & 1))
253 			continue;
254 		/* Enable Watch Dog Timer for CySym + Comp */
255 		ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val);
256 		/* Enable Watch Dog Timer for CyAsym */
257 		ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke);
258 	}
259 	return 0;
260 }
261 
262 /**
263  * adf_dev_init() - Init data structures and services for the given accel device
264  * @accel_dev: Pointer to acceleration device.
265  *
266  * Initialize the ring data structures and the admin comms and arbitration
267  * services.
268  *
269  * Return: 0 on success, error code otherwise.
270  */
271 int
adf_dev_init(struct adf_accel_dev * accel_dev)272 adf_dev_init(struct adf_accel_dev *accel_dev)
273 {
274 	int ret = 0;
275 
276 	mutex_lock(&accel_dev->lock);
277 	ret = adf_dev_init_locked(accel_dev);
278 	mutex_unlock(&accel_dev->lock);
279 
280 	return ret;
281 }
282 
283 static int
adf_dev_init_locked(struct adf_accel_dev * accel_dev)284 adf_dev_init_locked(struct adf_accel_dev *accel_dev)
285 {
286 	struct service_hndl *service;
287 	struct list_head *list_itr;
288 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
289 	char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
290 	int ret = 0;
291 	sysctl_ctx_init(&accel_dev->sysctl_ctx);
292 	set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status);
293 
294 	if (!hw_data) {
295 		device_printf(GET_DEV(accel_dev),
296 			      "Failed to init device - hw_data not set\n");
297 		return EFAULT;
298 	}
299 	if (hw_data->reset_hw_units)
300 		hw_data->reset_hw_units(accel_dev);
301 
302 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
303 	    !accel_dev->is_vf) {
304 		device_printf(GET_DEV(accel_dev), "Device not configured\n");
305 		return EFAULT;
306 	}
307 
308 	if (adf_init_etr_data(accel_dev)) {
309 		device_printf(GET_DEV(accel_dev), "Failed initialize etr\n");
310 		return EFAULT;
311 	}
312 
313 	if (hw_data->init_device && hw_data->init_device(accel_dev)) {
314 		device_printf(GET_DEV(accel_dev),
315 			      "Failed to initialize device\n");
316 		return EFAULT;
317 	}
318 
319 	if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) {
320 		device_printf(GET_DEV(accel_dev),
321 			      "Failed initialize accel_units\n");
322 		return EFAULT;
323 	}
324 
325 	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
326 		device_printf(GET_DEV(accel_dev),
327 			      "Failed initialize admin comms\n");
328 		return EFAULT;
329 	}
330 
331 	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
332 		device_printf(GET_DEV(accel_dev),
333 			      "Failed initialize hw arbiter\n");
334 		return EFAULT;
335 	}
336 
337 	if (hw_data->set_asym_rings_mask)
338 		hw_data->set_asym_rings_mask(accel_dev);
339 
340 	hw_data->enable_ints(accel_dev);
341 
342 	if (adf_ae_init(accel_dev)) {
343 		device_printf(GET_DEV(accel_dev),
344 			      "Failed to initialise Acceleration Engine\n");
345 		return EFAULT;
346 	}
347 
348 	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
349 
350 	if (adf_ae_fw_load(accel_dev)) {
351 		device_printf(GET_DEV(accel_dev),
352 			      "Failed to load acceleration FW\n");
353 		return EFAULT;
354 	}
355 	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
356 
357 	if (hw_data->alloc_irq(accel_dev)) {
358 		device_printf(GET_DEV(accel_dev),
359 			      "Failed to allocate interrupts\n");
360 		return EFAULT;
361 	}
362 	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
363 
364 	if (hw_data->init_ras && hw_data->init_ras(accel_dev)) {
365 		device_printf(GET_DEV(accel_dev), "Failed to init RAS\n");
366 		return EFAULT;
367 	}
368 
369 	hw_data->enable_ints(accel_dev);
370 
371 	hw_data->enable_error_correction(accel_dev);
372 
373 	ret = hw_data->csr_info.pfvf_ops.enable_comms(accel_dev);
374 	if (ret)
375 		return ret;
376 
377 	if (adf_cfg_add_device_params(accel_dev))
378 		return EFAULT;
379 
380 	if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev))
381 		return EFAULT;
382 
383 	if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev))
384 		return EFAULT;
385 	/*
386 	 * Subservice initialisation is divided into two stages: init and start.
387 	 * This is to facilitate any ordering dependencies between services
388 	 * prior to starting any of the accelerators.
389 	 */
390 	list_for_each(list_itr, &service_table)
391 	{
392 		service = list_entry(list_itr, struct service_hndl, list);
393 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
394 			device_printf(GET_DEV(accel_dev),
395 				      "Failed to initialise service %s\n",
396 				      service->name);
397 			return EFAULT;
398 		}
399 		set_bit(accel_dev->accel_id, service->init_status);
400 	}
401 
402 	/* Read autoreset on error parameter */
403 	ret = adf_cfg_get_param_value(accel_dev,
404 				      ADF_GENERAL_SEC,
405 				      ADF_AUTO_RESET_ON_ERROR,
406 				      value);
407 	if (!ret) {
408 		if (compat_strtouint(value,
409 				     10,
410 				     &accel_dev->autoreset_on_error)) {
411 			device_printf(
412 			    GET_DEV(accel_dev),
413 			    "Failed converting %s to a decimal value\n",
414 			    ADF_AUTO_RESET_ON_ERROR);
415 			return EFAULT;
416 		}
417 	}
418 
419 	return 0;
420 }
421 
422 /**
423  * adf_dev_start() - Start acceleration service for the given accel device
424  * @accel_dev:    Pointer to acceleration device.
425  *
426  * Function notifies all the registered services that the acceleration device
427  * is ready to be used.
428  * To be used by QAT device specific drivers.
429  *
430  * Return: 0 on success, error code otherwise.
431  */
432 int
adf_dev_start(struct adf_accel_dev * accel_dev)433 adf_dev_start(struct adf_accel_dev *accel_dev)
434 {
435 	int ret = 0;
436 
437 	mutex_lock(&accel_dev->lock);
438 	ret = adf_dev_start_locked(accel_dev);
439 	mutex_unlock(&accel_dev->lock);
440 
441 	return ret;
442 }
443 
444 static int
adf_dev_start_locked(struct adf_accel_dev * accel_dev)445 adf_dev_start_locked(struct adf_accel_dev *accel_dev)
446 {
447 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
448 	struct service_hndl *service;
449 	struct list_head *list_itr;
450 
451 	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
452 	if (adf_ae_start(accel_dev)) {
453 		device_printf(GET_DEV(accel_dev), "AE Start Failed\n");
454 		return EFAULT;
455 	}
456 
457 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
458 	if (hw_data->send_admin_init(accel_dev)) {
459 		device_printf(GET_DEV(accel_dev),
460 			      "Failed to send init message\n");
461 		return EFAULT;
462 	}
463 
464 	if (adf_cfg_add_fw_version(accel_dev)) {
465 		device_printf(GET_DEV(accel_dev),
466 			      "Failed to update configuration FW version\n");
467 		return EFAULT;
468 	}
469 
470 	if (hw_data->measure_clock)
471 		hw_data->measure_clock(accel_dev);
472 
473 	/*
474 	 * Set ssm watch dog timer for slice hang detection
475 	 * Note! Not supported on devices older than C62x
476 	 */
477 	if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) {
478 		device_printf(GET_DEV(accel_dev),
479 			      "QAT: Failed to set ssm watch dog timer\n");
480 		return EFAULT;
481 	}
482 
483 	if (hw_data->int_timer_init && hw_data->int_timer_init(accel_dev)) {
484 		device_printf(GET_DEV(accel_dev),
485 			      "Failed to init heartbeat interrupt timer\n");
486 		return -EFAULT;
487 	}
488 
489 	list_for_each(list_itr, &service_table)
490 	{
491 		service = list_entry(list_itr, struct service_hndl, list);
492 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
493 			device_printf(GET_DEV(accel_dev),
494 				      "Failed to start service %s\n",
495 				      service->name);
496 			return EFAULT;
497 		}
498 		set_bit(accel_dev->accel_id, service->start_status);
499 	}
500 
501 	if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) {
502 		/*Register UIO devices */
503 		if (adf_uio_register(accel_dev)) {
504 			adf_uio_remove(accel_dev);
505 			device_printf(GET_DEV(accel_dev),
506 				      "Failed to register UIO devices\n");
507 			set_bit(ADF_STATUS_STARTING, &accel_dev->status);
508 			clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
509 			return ENODEV;
510 		}
511 	}
512 
513 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) &&
514 	    adf_cfg_add_ext_params(accel_dev))
515 		return EFAULT;
516 
517 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
518 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
519 
520 	adf_dbgfs_add(accel_dev);
521 
522 	return 0;
523 }
524 
525 /**
526  * adf_dev_stop() - Stop acceleration service for the given accel device
527  * @accel_dev:    Pointer to acceleration device.
528  *
529  * Function notifies all the registered services that the acceleration device
530  * is shuting down.
531  * To be used by QAT device specific drivers.
532  *
533  * Return: 0 on success, error code otherwise.
534  */
535 int
adf_dev_stop(struct adf_accel_dev * accel_dev)536 adf_dev_stop(struct adf_accel_dev *accel_dev)
537 {
538 	int ret = 0;
539 
540 	mutex_lock(&accel_dev->lock);
541 	ret = adf_dev_stop_locked(accel_dev);
542 	mutex_unlock(&accel_dev->lock);
543 
544 	return ret;
545 }
546 
547 static int
adf_dev_stop_locked(struct adf_accel_dev * accel_dev)548 adf_dev_stop_locked(struct adf_accel_dev *accel_dev)
549 {
550 	struct service_hndl *service;
551 	struct list_head *list_itr;
552 
553 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status))
554 		return 0;
555 
556 	if (!adf_dev_started(accel_dev) &&
557 	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
558 		return 0;
559 	}
560 
561 	if (adf_dev_stop_notify_sync(accel_dev)) {
562 		device_printf(
563 		    GET_DEV(accel_dev),
564 		    "Waiting for device un-busy failed. Retries limit reached\n");
565 		return EBUSY;
566 	}
567 
568 	adf_dbgfs_rm(accel_dev);
569 
570 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
571 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
572 
573 	if (accel_dev->hw_device->int_timer_exit)
574 		accel_dev->hw_device->int_timer_exit(accel_dev);
575 
576 	list_for_each(list_itr, &service_table)
577 	{
578 		service = list_entry(list_itr, struct service_hndl, list);
579 		if (!test_bit(accel_dev->accel_id, service->start_status))
580 			continue;
581 		clear_bit(accel_dev->accel_id, service->start_status);
582 	}
583 
584 	if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) {
585 		/* Remove UIO Devices */
586 		adf_uio_remove(accel_dev);
587 	}
588 
589 	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
590 		if (adf_ae_stop(accel_dev))
591 			device_printf(GET_DEV(accel_dev),
592 				      "failed to stop AE\n");
593 		else
594 			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
595 	}
596 
597 	return 0;
598 }
599 
600 /**
601  * adf_dev_shutdown() - shutdown acceleration services and data strucutures
602  * @accel_dev: Pointer to acceleration device
603  *
604  * Cleanup the ring data structures and the admin comms and arbitration
605  * services.
606  */
607 void
adf_dev_shutdown(struct adf_accel_dev * accel_dev)608 adf_dev_shutdown(struct adf_accel_dev *accel_dev)
609 {
610 	mutex_lock(&accel_dev->lock);
611 	adf_dev_shutdown_locked(accel_dev);
612 	mutex_unlock(&accel_dev->lock);
613 }
614 
615 static void
adf_dev_shutdown_locked(struct adf_accel_dev * accel_dev)616 adf_dev_shutdown_locked(struct adf_accel_dev *accel_dev)
617 {
618 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
619 	struct service_hndl *service;
620 	struct list_head *list_itr;
621 
622 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status))
623 		return;
624 
625 	if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) {
626 		sysctl_ctx_free(&accel_dev->sysctl_ctx);
627 		clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED,
628 			  &accel_dev->status);
629 	}
630 
631 	if (!hw_data) {
632 		device_printf(
633 		    GET_DEV(accel_dev),
634 		    "QAT: Failed to shutdown device - hw_data not set\n");
635 		return;
636 	}
637 
638 	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
639 		adf_ae_fw_release(accel_dev);
640 		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
641 	}
642 
643 	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
644 		if (adf_ae_shutdown(accel_dev))
645 			device_printf(GET_DEV(accel_dev),
646 				      "Failed to shutdown Accel Engine\n");
647 		else
648 			clear_bit(ADF_STATUS_AE_INITIALISED,
649 				  &accel_dev->status);
650 	}
651 
652 	list_for_each(list_itr, &service_table)
653 	{
654 		service = list_entry(list_itr, struct service_hndl, list);
655 		if (!test_bit(accel_dev->accel_id, service->init_status))
656 			continue;
657 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
658 			device_printf(GET_DEV(accel_dev),
659 				      "Failed to shutdown service %s\n",
660 				      service->name);
661 		else
662 			clear_bit(accel_dev->accel_id, service->init_status);
663 	}
664 
665 	hw_data->disable_iov(accel_dev);
666 
667 	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
668 		hw_data->free_irq(accel_dev);
669 		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
670 	}
671 
672 	/* Delete configuration only if not restarting */
673 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
674 		adf_cfg_del_all(accel_dev);
675 #ifdef QAT_UIO
676 		adf_cfg_device_clear_all(accel_dev);
677 #endif
678 	}
679 
680 	if (hw_data->remove_pke_stats)
681 		hw_data->remove_pke_stats(accel_dev);
682 
683 	if (hw_data->remove_misc_error)
684 		hw_data->remove_misc_error(accel_dev);
685 
686 	if (hw_data->exit_ras)
687 		hw_data->exit_ras(accel_dev);
688 
689 	if (hw_data->exit_arb)
690 		hw_data->exit_arb(accel_dev);
691 
692 	if (hw_data->exit_admin_comms)
693 		hw_data->exit_admin_comms(accel_dev);
694 
695 	if (hw_data->exit_accel_units)
696 		hw_data->exit_accel_units(accel_dev);
697 
698 	adf_cleanup_etr_data(accel_dev);
699 	if (hw_data->restore_device)
700 		hw_data->restore_device(accel_dev);
701 }
702 
703 /**
704  * adf_dev_reset() - Reset acceleration service for the given accel device
705  * @accel_dev:    Pointer to acceleration device.
706  * @mode: Specifies reset mode - synchronous or asynchronous.
707  * Function notifies all the registered services that the acceleration device
708  * is resetting.
709  * To be used by QAT device specific drivers.
710  *
711  * Return: 0 on success, error code otherwise.
712  */
713 int
adf_dev_reset(struct adf_accel_dev * accel_dev,enum adf_dev_reset_mode mode)714 adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode)
715 {
716 	return adf_dev_aer_schedule_reset(accel_dev, mode);
717 }
718 
719 int
adf_dev_restarting_notify(struct adf_accel_dev * accel_dev)720 adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
721 {
722 	struct service_hndl *service;
723 	struct list_head *list_itr;
724 
725 	list_for_each(list_itr, &service_table)
726 	{
727 		service = list_entry(list_itr, struct service_hndl, list);
728 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
729 			device_printf(GET_DEV(accel_dev),
730 				      "Failed to restart service %s.\n",
731 				      service->name);
732 	}
733 	return 0;
734 }
735 
736 int
adf_dev_restarting_notify_sync(struct adf_accel_dev * accel_dev)737 adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev)
738 {
739 	int times;
740 
741 	adf_dev_restarting_notify(accel_dev);
742 	for (times = 0; times < ADF_STOP_RETRY; times++) {
743 		if (!adf_dev_in_use(accel_dev))
744 			break;
745 		dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times);
746 		pause_ms("adfstop", 100);
747 	}
748 	if (adf_dev_in_use(accel_dev)) {
749 		clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
750 		device_printf(GET_DEV(accel_dev),
751 			      "Device still in use during reset sequence.\n");
752 		return EBUSY;
753 	}
754 
755 	return 0;
756 }
757 
758 int
adf_dev_stop_notify_sync(struct adf_accel_dev * accel_dev)759 adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev)
760 {
761 	int times;
762 
763 	struct service_hndl *service;
764 	struct list_head *list_itr;
765 
766 	list_for_each(list_itr, &service_table)
767 	{
768 		service = list_entry(list_itr, struct service_hndl, list);
769 		if (service->event_hld(accel_dev, ADF_EVENT_STOP))
770 			device_printf(GET_DEV(accel_dev),
771 				      "Failed to restart service %s.\n",
772 				      service->name);
773 	}
774 
775 	for (times = 0; times < ADF_STOP_RETRY; times++) {
776 		if (!adf_dev_in_use(accel_dev))
777 			break;
778 		dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times);
779 		pause_ms("adfstop", 100);
780 	}
781 	if (adf_dev_in_use(accel_dev)) {
782 		clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
783 		device_printf(GET_DEV(accel_dev),
784 			      "Device still in use during stop sequence.\n");
785 		return EBUSY;
786 	}
787 
788 	return 0;
789 }
790 
791 int
adf_dev_restarted_notify(struct adf_accel_dev * accel_dev)792 adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
793 {
794 	struct service_hndl *service;
795 	struct list_head *list_itr;
796 
797 	list_for_each(list_itr, &service_table)
798 	{
799 		service = list_entry(list_itr, struct service_hndl, list);
800 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
801 			device_printf(GET_DEV(accel_dev),
802 				      "Failed to restart service %s.\n",
803 				      service->name);
804 	}
805 	return 0;
806 }
807