xref: /linux/drivers/crypto/intel/qat/qat_common/adf_init.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 #include "adf_dbgfs.h"
11 #include "adf_heartbeat.h"
12 #include "adf_rl.h"
13 #include "adf_sysfs_ras_counters.h"
14 #include "adf_telemetry.h"
15 
16 static LIST_HEAD(service_table);
17 static DEFINE_MUTEX(service_lock);
18 
19 static void adf_service_add(struct service_hndl *service)
20 {
21 	mutex_lock(&service_lock);
22 	list_add(&service->list, &service_table);
23 	mutex_unlock(&service_lock);
24 }
25 
26 int adf_service_register(struct service_hndl *service)
27 {
28 	memset(service->init_status, 0, sizeof(service->init_status));
29 	memset(service->start_status, 0, sizeof(service->start_status));
30 	adf_service_add(service);
31 	return 0;
32 }
33 
34 static void adf_service_remove(struct service_hndl *service)
35 {
36 	mutex_lock(&service_lock);
37 	list_del(&service->list);
38 	mutex_unlock(&service_lock);
39 }
40 
41 int adf_service_unregister(struct service_hndl *service)
42 {
43 	int i;
44 
45 	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
46 		if (service->init_status[i] || service->start_status[i]) {
47 			pr_err("QAT: Could not remove active service\n");
48 			return -EFAULT;
49 		}
50 	}
51 	adf_service_remove(service);
52 	return 0;
53 }
54 
55 /**
56  * adf_dev_init() - Init data structures and services for the given accel device
57  * @accel_dev: Pointer to acceleration device.
58  *
59  * Initialize the ring data structures and the admin comms and arbitration
60  * services.
61  *
62  * Return: 0 on success, error code otherwise.
63  */
64 static int adf_dev_init(struct adf_accel_dev *accel_dev)
65 {
66 	struct service_hndl *service;
67 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
68 	int ret;
69 
70 	if (!hw_data) {
71 		dev_err(&GET_DEV(accel_dev),
72 			"Failed to init device - hw_data not set\n");
73 		return -EFAULT;
74 	}
75 
76 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
77 	    !accel_dev->is_vf) {
78 		dev_err(&GET_DEV(accel_dev), "Device not configured\n");
79 		return -EFAULT;
80 	}
81 
82 	if (adf_init_etr_data(accel_dev)) {
83 		dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
84 		return -EFAULT;
85 	}
86 
87 	if (hw_data->init_device && hw_data->init_device(accel_dev)) {
88 		dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
89 		return -EFAULT;
90 	}
91 
92 	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
93 		dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
94 		return -EFAULT;
95 	}
96 
97 	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
98 		dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
99 		return -EFAULT;
100 	}
101 
102 	if (hw_data->get_ring_to_svc_map)
103 		hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
104 
105 	if (adf_ae_init(accel_dev)) {
106 		dev_err(&GET_DEV(accel_dev),
107 			"Failed to initialise Acceleration Engine\n");
108 		return -EFAULT;
109 	}
110 	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
111 
112 	if (adf_ae_fw_load(accel_dev)) {
113 		dev_err(&GET_DEV(accel_dev),
114 			"Failed to load acceleration FW\n");
115 		return -EFAULT;
116 	}
117 	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
118 
119 	if (hw_data->alloc_irq(accel_dev)) {
120 		dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
121 		return -EFAULT;
122 	}
123 	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
124 
125 	if (hw_data->ras_ops.enable_ras_errors)
126 		hw_data->ras_ops.enable_ras_errors(accel_dev);
127 
128 	hw_data->enable_ints(accel_dev);
129 	hw_data->enable_error_correction(accel_dev);
130 
131 	ret = hw_data->pfvf_ops.enable_comms(accel_dev);
132 	if (ret)
133 		return ret;
134 
135 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
136 	    accel_dev->is_vf) {
137 		if (qat_crypto_vf_dev_config(accel_dev))
138 			return -EFAULT;
139 	}
140 
141 	adf_heartbeat_init(accel_dev);
142 	ret = adf_rl_init(accel_dev);
143 	if (ret && ret != -EOPNOTSUPP)
144 		return ret;
145 
146 	ret = adf_tl_init(accel_dev);
147 	if (ret && ret != -EOPNOTSUPP)
148 		return ret;
149 
150 	/*
151 	 * Subservice initialisation is divided into two stages: init and start.
152 	 * This is to facilitate any ordering dependencies between services
153 	 * prior to starting any of the accelerators.
154 	 */
155 	list_for_each_entry(service, &service_table, list) {
156 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
157 			dev_err(&GET_DEV(accel_dev),
158 				"Failed to initialise service %s\n",
159 				service->name);
160 			return -EFAULT;
161 		}
162 		set_bit(accel_dev->accel_id, service->init_status);
163 	}
164 
165 	return 0;
166 }
167 
168 /**
169  * adf_dev_start() - Start acceleration service for the given accel device
170  * @accel_dev:    Pointer to acceleration device.
171  *
172  * Function notifies all the registered services that the acceleration device
173  * is ready to be used.
174  * To be used by QAT device specific drivers.
175  *
176  * Return: 0 on success, error code otherwise.
177  */
178 static int adf_dev_start(struct adf_accel_dev *accel_dev)
179 {
180 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
181 	struct service_hndl *service;
182 	int ret;
183 
184 	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
185 
186 	if (adf_ae_start(accel_dev)) {
187 		dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
188 		return -EFAULT;
189 	}
190 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
191 
192 	if (hw_data->send_admin_init(accel_dev)) {
193 		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
194 		return -EFAULT;
195 	}
196 
197 	if (hw_data->measure_clock) {
198 		ret = hw_data->measure_clock(accel_dev);
199 		if (ret) {
200 			dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
201 			return ret;
202 		}
203 	}
204 
205 	/* Set ssm watch dog timer */
206 	if (hw_data->set_ssm_wdtimer)
207 		hw_data->set_ssm_wdtimer(accel_dev);
208 
209 	/* Enable Power Management */
210 	if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
211 		dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
212 		return -EFAULT;
213 	}
214 
215 	if (hw_data->start_timer) {
216 		ret = hw_data->start_timer(accel_dev);
217 		if (ret) {
218 			dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
219 			return ret;
220 		}
221 	}
222 
223 	adf_heartbeat_start(accel_dev);
224 	ret = adf_rl_start(accel_dev);
225 	if (ret && ret != -EOPNOTSUPP)
226 		return ret;
227 
228 	ret = adf_tl_start(accel_dev);
229 	if (ret && ret != -EOPNOTSUPP)
230 		return ret;
231 
232 	list_for_each_entry(service, &service_table, list) {
233 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
234 			dev_err(&GET_DEV(accel_dev),
235 				"Failed to start service %s\n",
236 				service->name);
237 			return -EFAULT;
238 		}
239 		set_bit(accel_dev->accel_id, service->start_status);
240 	}
241 
242 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
243 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
244 
245 	if (!list_empty(&accel_dev->crypto_list) &&
246 	    (qat_algs_register() || qat_asym_algs_register())) {
247 		dev_err(&GET_DEV(accel_dev),
248 			"Failed to register crypto algs\n");
249 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
250 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
251 		return -EFAULT;
252 	}
253 	set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
254 
255 	if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
256 		dev_err(&GET_DEV(accel_dev),
257 			"Failed to register compression algs\n");
258 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
259 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
260 		return -EFAULT;
261 	}
262 	set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
263 
264 	adf_dbgfs_add(accel_dev);
265 	adf_sysfs_start_ras(accel_dev);
266 
267 	return 0;
268 }
269 
270 /**
271  * adf_dev_stop() - Stop acceleration service for the given accel device
272  * @accel_dev:    Pointer to acceleration device.
273  *
274  * Function notifies all the registered services that the acceleration device
275  * is shuting down.
276  * To be used by QAT device specific drivers.
277  *
278  * Return: void
279  */
280 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
281 {
282 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
283 	struct service_hndl *service;
284 	bool wait = false;
285 	int ret;
286 
287 	if (!adf_dev_started(accel_dev) &&
288 	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
289 		return;
290 
291 	adf_tl_stop(accel_dev);
292 	adf_rl_stop(accel_dev);
293 	adf_dbgfs_rm(accel_dev);
294 	adf_sysfs_stop_ras(accel_dev);
295 
296 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
297 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
298 
299 	if (!list_empty(&accel_dev->crypto_list) &&
300 	    test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
301 		qat_algs_unregister();
302 		qat_asym_algs_unregister();
303 	}
304 	clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
305 
306 	if (!list_empty(&accel_dev->compression_list) &&
307 	    test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
308 		qat_comp_algs_unregister();
309 	clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
310 
311 	list_for_each_entry(service, &service_table, list) {
312 		if (!test_bit(accel_dev->accel_id, service->start_status))
313 			continue;
314 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
315 		if (!ret) {
316 			clear_bit(accel_dev->accel_id, service->start_status);
317 		} else if (ret == -EAGAIN) {
318 			wait = true;
319 			clear_bit(accel_dev->accel_id, service->start_status);
320 		}
321 	}
322 
323 	if (hw_data->stop_timer)
324 		hw_data->stop_timer(accel_dev);
325 
326 	if (wait)
327 		msleep(100);
328 
329 	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
330 		if (adf_ae_stop(accel_dev))
331 			dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
332 		else
333 			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
334 	}
335 }
336 
337 /**
338  * adf_dev_shutdown() - shutdown acceleration services and data strucutures
339  * @accel_dev: Pointer to acceleration device
340  *
341  * Cleanup the ring data structures and the admin comms and arbitration
342  * services.
343  */
344 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
345 {
346 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
347 	struct service_hndl *service;
348 
349 	if (!hw_data) {
350 		dev_err(&GET_DEV(accel_dev),
351 			"QAT: Failed to shutdown device - hw_data not set\n");
352 		return;
353 	}
354 
355 	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
356 		adf_ae_fw_release(accel_dev);
357 		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
358 	}
359 
360 	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
361 		if (adf_ae_shutdown(accel_dev))
362 			dev_err(&GET_DEV(accel_dev),
363 				"Failed to shutdown Accel Engine\n");
364 		else
365 			clear_bit(ADF_STATUS_AE_INITIALISED,
366 				  &accel_dev->status);
367 	}
368 
369 	list_for_each_entry(service, &service_table, list) {
370 		if (!test_bit(accel_dev->accel_id, service->init_status))
371 			continue;
372 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
373 			dev_err(&GET_DEV(accel_dev),
374 				"Failed to shutdown service %s\n",
375 				service->name);
376 		else
377 			clear_bit(accel_dev->accel_id, service->init_status);
378 	}
379 
380 	adf_rl_exit(accel_dev);
381 
382 	if (hw_data->ras_ops.disable_ras_errors)
383 		hw_data->ras_ops.disable_ras_errors(accel_dev);
384 
385 	adf_heartbeat_shutdown(accel_dev);
386 
387 	adf_tl_shutdown(accel_dev);
388 
389 	hw_data->disable_iov(accel_dev);
390 
391 	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
392 		hw_data->free_irq(accel_dev);
393 		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
394 	}
395 
396 	/* Delete configuration only if not restarting */
397 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
398 		adf_cfg_del_all(accel_dev);
399 
400 	if (hw_data->exit_arb)
401 		hw_data->exit_arb(accel_dev);
402 
403 	if (hw_data->exit_admin_comms)
404 		hw_data->exit_admin_comms(accel_dev);
405 
406 	adf_cleanup_etr_data(accel_dev);
407 	adf_dev_restore(accel_dev);
408 }
409 
410 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
411 {
412 	struct service_hndl *service;
413 
414 	list_for_each_entry(service, &service_table, list) {
415 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
416 			dev_err(&GET_DEV(accel_dev),
417 				"Failed to restart service %s.\n",
418 				service->name);
419 	}
420 	return 0;
421 }
422 
423 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
424 {
425 	struct service_hndl *service;
426 
427 	list_for_each_entry(service, &service_table, list) {
428 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
429 			dev_err(&GET_DEV(accel_dev),
430 				"Failed to restart service %s.\n",
431 				service->name);
432 	}
433 	return 0;
434 }
435 
436 void adf_error_notifier(struct adf_accel_dev *accel_dev)
437 {
438 	struct service_hndl *service;
439 
440 	list_for_each_entry(service, &service_table, list) {
441 		if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR))
442 			dev_err(&GET_DEV(accel_dev),
443 				"Failed to send error event to %s.\n",
444 				service->name);
445 	}
446 }
447 
448 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
449 {
450 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
451 	int ret;
452 
453 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
454 				      ADF_SERVICES_ENABLED, services);
455 
456 	adf_dev_stop(accel_dev);
457 	adf_dev_shutdown(accel_dev);
458 
459 	if (!ret) {
460 		ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
461 		if (ret)
462 			return ret;
463 
464 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
465 						  ADF_SERVICES_ENABLED,
466 						  services, ADF_STR);
467 		if (ret)
468 			return ret;
469 	}
470 
471 	return 0;
472 }
473 
474 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
475 {
476 	int ret = 0;
477 
478 	if (!accel_dev)
479 		return -EINVAL;
480 
481 	mutex_lock(&accel_dev->state_lock);
482 
483 	if (reconfig) {
484 		ret = adf_dev_shutdown_cache_cfg(accel_dev);
485 		goto out;
486 	}
487 
488 	adf_dev_stop(accel_dev);
489 	adf_dev_shutdown(accel_dev);
490 
491 out:
492 	mutex_unlock(&accel_dev->state_lock);
493 	return ret;
494 }
495 EXPORT_SYMBOL_GPL(adf_dev_down);
496 
497 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
498 {
499 	int ret = 0;
500 
501 	if (!accel_dev)
502 		return -EINVAL;
503 
504 	mutex_lock(&accel_dev->state_lock);
505 
506 	if (adf_dev_started(accel_dev)) {
507 		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
508 			 accel_dev->accel_id);
509 		ret = -EALREADY;
510 		goto out;
511 	}
512 
513 	if (config && GET_HW_DATA(accel_dev)->dev_config) {
514 		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
515 		if (unlikely(ret))
516 			goto out;
517 	}
518 
519 	ret = adf_dev_init(accel_dev);
520 	if (unlikely(ret))
521 		goto out;
522 
523 	ret = adf_dev_start(accel_dev);
524 
525 out:
526 	mutex_unlock(&accel_dev->state_lock);
527 	return ret;
528 }
529 EXPORT_SYMBOL_GPL(adf_dev_up);
530 
531 int adf_dev_restart(struct adf_accel_dev *accel_dev)
532 {
533 	int ret = 0;
534 
535 	if (!accel_dev)
536 		return -EFAULT;
537 
538 	adf_dev_down(accel_dev, false);
539 
540 	ret = adf_dev_up(accel_dev, false);
541 	/* if device is already up return success*/
542 	if (ret == -EALREADY)
543 		return 0;
544 
545 	return ret;
546 }
547 EXPORT_SYMBOL_GPL(adf_dev_restart);
548