xref: /linux/drivers/crypto/intel/qat/qat_common/adf_init.c (revision ab475966455ce285c2c9978a3e3bfe97d75ff8d4)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 #include "adf_dbgfs.h"
11 #include "adf_heartbeat.h"
12 #include "adf_rl.h"
13 #include "adf_sysfs_ras_counters.h"
14 
15 static LIST_HEAD(service_table);
16 static DEFINE_MUTEX(service_lock);
17 
18 static void adf_service_add(struct service_hndl *service)
19 {
20 	mutex_lock(&service_lock);
21 	list_add(&service->list, &service_table);
22 	mutex_unlock(&service_lock);
23 }
24 
25 int adf_service_register(struct service_hndl *service)
26 {
27 	memset(service->init_status, 0, sizeof(service->init_status));
28 	memset(service->start_status, 0, sizeof(service->start_status));
29 	adf_service_add(service);
30 	return 0;
31 }
32 
33 static void adf_service_remove(struct service_hndl *service)
34 {
35 	mutex_lock(&service_lock);
36 	list_del(&service->list);
37 	mutex_unlock(&service_lock);
38 }
39 
40 int adf_service_unregister(struct service_hndl *service)
41 {
42 	int i;
43 
44 	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
45 		if (service->init_status[i] || service->start_status[i]) {
46 			pr_err("QAT: Could not remove active service\n");
47 			return -EFAULT;
48 		}
49 	}
50 	adf_service_remove(service);
51 	return 0;
52 }
53 
54 /**
55  * adf_dev_init() - Init data structures and services for the given accel device
56  * @accel_dev: Pointer to acceleration device.
57  *
58  * Initialize the ring data structures and the admin comms and arbitration
59  * services.
60  *
61  * Return: 0 on success, error code otherwise.
62  */
63 static int adf_dev_init(struct adf_accel_dev *accel_dev)
64 {
65 	struct service_hndl *service;
66 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
67 	int ret;
68 
69 	if (!hw_data) {
70 		dev_err(&GET_DEV(accel_dev),
71 			"Failed to init device - hw_data not set\n");
72 		return -EFAULT;
73 	}
74 
75 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
76 	    !accel_dev->is_vf) {
77 		dev_err(&GET_DEV(accel_dev), "Device not configured\n");
78 		return -EFAULT;
79 	}
80 
81 	if (adf_init_etr_data(accel_dev)) {
82 		dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
83 		return -EFAULT;
84 	}
85 
86 	if (hw_data->init_device && hw_data->init_device(accel_dev)) {
87 		dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
88 		return -EFAULT;
89 	}
90 
91 	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
92 		dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
93 		return -EFAULT;
94 	}
95 
96 	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
97 		dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
98 		return -EFAULT;
99 	}
100 
101 	if (hw_data->get_ring_to_svc_map)
102 		hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
103 
104 	if (adf_ae_init(accel_dev)) {
105 		dev_err(&GET_DEV(accel_dev),
106 			"Failed to initialise Acceleration Engine\n");
107 		return -EFAULT;
108 	}
109 	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
110 
111 	if (adf_ae_fw_load(accel_dev)) {
112 		dev_err(&GET_DEV(accel_dev),
113 			"Failed to load acceleration FW\n");
114 		return -EFAULT;
115 	}
116 	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
117 
118 	if (hw_data->alloc_irq(accel_dev)) {
119 		dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
120 		return -EFAULT;
121 	}
122 	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
123 
124 	if (hw_data->ras_ops.enable_ras_errors)
125 		hw_data->ras_ops.enable_ras_errors(accel_dev);
126 
127 	hw_data->enable_ints(accel_dev);
128 	hw_data->enable_error_correction(accel_dev);
129 
130 	ret = hw_data->pfvf_ops.enable_comms(accel_dev);
131 	if (ret)
132 		return ret;
133 
134 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
135 	    accel_dev->is_vf) {
136 		if (qat_crypto_vf_dev_config(accel_dev))
137 			return -EFAULT;
138 	}
139 
140 	adf_heartbeat_init(accel_dev);
141 	ret = adf_rl_init(accel_dev);
142 	if (ret && ret != -EOPNOTSUPP)
143 		return ret;
144 
145 	/*
146 	 * Subservice initialisation is divided into two stages: init and start.
147 	 * This is to facilitate any ordering dependencies between services
148 	 * prior to starting any of the accelerators.
149 	 */
150 	list_for_each_entry(service, &service_table, list) {
151 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
152 			dev_err(&GET_DEV(accel_dev),
153 				"Failed to initialise service %s\n",
154 				service->name);
155 			return -EFAULT;
156 		}
157 		set_bit(accel_dev->accel_id, service->init_status);
158 	}
159 
160 	return 0;
161 }
162 
163 /**
164  * adf_dev_start() - Start acceleration service for the given accel device
165  * @accel_dev:    Pointer to acceleration device.
166  *
167  * Function notifies all the registered services that the acceleration device
168  * is ready to be used.
169  * To be used by QAT device specific drivers.
170  *
171  * Return: 0 on success, error code otherwise.
172  */
173 static int adf_dev_start(struct adf_accel_dev *accel_dev)
174 {
175 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
176 	struct service_hndl *service;
177 	int ret;
178 
179 	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
180 
181 	if (adf_ae_start(accel_dev)) {
182 		dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
183 		return -EFAULT;
184 	}
185 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
186 
187 	if (hw_data->send_admin_init(accel_dev)) {
188 		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
189 		return -EFAULT;
190 	}
191 
192 	if (hw_data->measure_clock) {
193 		ret = hw_data->measure_clock(accel_dev);
194 		if (ret) {
195 			dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
196 			return ret;
197 		}
198 	}
199 
200 	/* Set ssm watch dog timer */
201 	if (hw_data->set_ssm_wdtimer)
202 		hw_data->set_ssm_wdtimer(accel_dev);
203 
204 	/* Enable Power Management */
205 	if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
206 		dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
207 		return -EFAULT;
208 	}
209 
210 	if (hw_data->start_timer) {
211 		ret = hw_data->start_timer(accel_dev);
212 		if (ret) {
213 			dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
214 			return ret;
215 		}
216 	}
217 
218 	adf_heartbeat_start(accel_dev);
219 	ret = adf_rl_start(accel_dev);
220 	if (ret && ret != -EOPNOTSUPP)
221 		return ret;
222 
223 	list_for_each_entry(service, &service_table, list) {
224 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
225 			dev_err(&GET_DEV(accel_dev),
226 				"Failed to start service %s\n",
227 				service->name);
228 			return -EFAULT;
229 		}
230 		set_bit(accel_dev->accel_id, service->start_status);
231 	}
232 
233 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
234 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
235 
236 	if (!list_empty(&accel_dev->crypto_list) &&
237 	    (qat_algs_register() || qat_asym_algs_register())) {
238 		dev_err(&GET_DEV(accel_dev),
239 			"Failed to register crypto algs\n");
240 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
241 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
242 		return -EFAULT;
243 	}
244 	set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
245 
246 	if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
247 		dev_err(&GET_DEV(accel_dev),
248 			"Failed to register compression algs\n");
249 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
250 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
251 		return -EFAULT;
252 	}
253 	set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
254 
255 	adf_dbgfs_add(accel_dev);
256 	adf_sysfs_start_ras(accel_dev);
257 
258 	return 0;
259 }
260 
261 /**
262  * adf_dev_stop() - Stop acceleration service for the given accel device
263  * @accel_dev:    Pointer to acceleration device.
264  *
265  * Function notifies all the registered services that the acceleration device
266  * is shuting down.
267  * To be used by QAT device specific drivers.
268  *
269  * Return: void
270  */
271 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
272 {
273 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
274 	struct service_hndl *service;
275 	bool wait = false;
276 	int ret;
277 
278 	if (!adf_dev_started(accel_dev) &&
279 	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
280 		return;
281 
282 	adf_rl_stop(accel_dev);
283 	adf_dbgfs_rm(accel_dev);
284 	adf_sysfs_stop_ras(accel_dev);
285 
286 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
287 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
288 
289 	if (!list_empty(&accel_dev->crypto_list) &&
290 	    test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
291 		qat_algs_unregister();
292 		qat_asym_algs_unregister();
293 	}
294 	clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
295 
296 	if (!list_empty(&accel_dev->compression_list) &&
297 	    test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
298 		qat_comp_algs_unregister();
299 	clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
300 
301 	list_for_each_entry(service, &service_table, list) {
302 		if (!test_bit(accel_dev->accel_id, service->start_status))
303 			continue;
304 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
305 		if (!ret) {
306 			clear_bit(accel_dev->accel_id, service->start_status);
307 		} else if (ret == -EAGAIN) {
308 			wait = true;
309 			clear_bit(accel_dev->accel_id, service->start_status);
310 		}
311 	}
312 
313 	if (hw_data->stop_timer)
314 		hw_data->stop_timer(accel_dev);
315 
316 	if (wait)
317 		msleep(100);
318 
319 	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
320 		if (adf_ae_stop(accel_dev))
321 			dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
322 		else
323 			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
324 	}
325 }
326 
327 /**
328  * adf_dev_shutdown() - shutdown acceleration services and data strucutures
329  * @accel_dev: Pointer to acceleration device
330  *
331  * Cleanup the ring data structures and the admin comms and arbitration
332  * services.
333  */
334 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
335 {
336 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
337 	struct service_hndl *service;
338 
339 	if (!hw_data) {
340 		dev_err(&GET_DEV(accel_dev),
341 			"QAT: Failed to shutdown device - hw_data not set\n");
342 		return;
343 	}
344 
345 	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
346 		adf_ae_fw_release(accel_dev);
347 		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
348 	}
349 
350 	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
351 		if (adf_ae_shutdown(accel_dev))
352 			dev_err(&GET_DEV(accel_dev),
353 				"Failed to shutdown Accel Engine\n");
354 		else
355 			clear_bit(ADF_STATUS_AE_INITIALISED,
356 				  &accel_dev->status);
357 	}
358 
359 	list_for_each_entry(service, &service_table, list) {
360 		if (!test_bit(accel_dev->accel_id, service->init_status))
361 			continue;
362 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
363 			dev_err(&GET_DEV(accel_dev),
364 				"Failed to shutdown service %s\n",
365 				service->name);
366 		else
367 			clear_bit(accel_dev->accel_id, service->init_status);
368 	}
369 
370 	adf_rl_exit(accel_dev);
371 
372 	if (hw_data->ras_ops.disable_ras_errors)
373 		hw_data->ras_ops.disable_ras_errors(accel_dev);
374 
375 	adf_heartbeat_shutdown(accel_dev);
376 
377 	hw_data->disable_iov(accel_dev);
378 
379 	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
380 		hw_data->free_irq(accel_dev);
381 		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
382 	}
383 
384 	/* Delete configuration only if not restarting */
385 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
386 		adf_cfg_del_all(accel_dev);
387 
388 	if (hw_data->exit_arb)
389 		hw_data->exit_arb(accel_dev);
390 
391 	if (hw_data->exit_admin_comms)
392 		hw_data->exit_admin_comms(accel_dev);
393 
394 	adf_cleanup_etr_data(accel_dev);
395 	adf_dev_restore(accel_dev);
396 }
397 
398 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
399 {
400 	struct service_hndl *service;
401 
402 	list_for_each_entry(service, &service_table, list) {
403 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
404 			dev_err(&GET_DEV(accel_dev),
405 				"Failed to restart service %s.\n",
406 				service->name);
407 	}
408 	return 0;
409 }
410 
411 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
412 {
413 	struct service_hndl *service;
414 
415 	list_for_each_entry(service, &service_table, list) {
416 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
417 			dev_err(&GET_DEV(accel_dev),
418 				"Failed to restart service %s.\n",
419 				service->name);
420 	}
421 	return 0;
422 }
423 
424 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
425 {
426 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
427 	int ret;
428 
429 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
430 				      ADF_SERVICES_ENABLED, services);
431 
432 	adf_dev_stop(accel_dev);
433 	adf_dev_shutdown(accel_dev);
434 
435 	if (!ret) {
436 		ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
437 		if (ret)
438 			return ret;
439 
440 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
441 						  ADF_SERVICES_ENABLED,
442 						  services, ADF_STR);
443 		if (ret)
444 			return ret;
445 	}
446 
447 	return 0;
448 }
449 
450 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
451 {
452 	int ret = 0;
453 
454 	if (!accel_dev)
455 		return -EINVAL;
456 
457 	mutex_lock(&accel_dev->state_lock);
458 
459 	if (reconfig) {
460 		ret = adf_dev_shutdown_cache_cfg(accel_dev);
461 		goto out;
462 	}
463 
464 	adf_dev_stop(accel_dev);
465 	adf_dev_shutdown(accel_dev);
466 
467 out:
468 	mutex_unlock(&accel_dev->state_lock);
469 	return ret;
470 }
471 EXPORT_SYMBOL_GPL(adf_dev_down);
472 
473 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
474 {
475 	int ret = 0;
476 
477 	if (!accel_dev)
478 		return -EINVAL;
479 
480 	mutex_lock(&accel_dev->state_lock);
481 
482 	if (adf_dev_started(accel_dev)) {
483 		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
484 			 accel_dev->accel_id);
485 		ret = -EALREADY;
486 		goto out;
487 	}
488 
489 	if (config && GET_HW_DATA(accel_dev)->dev_config) {
490 		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
491 		if (unlikely(ret))
492 			goto out;
493 	}
494 
495 	ret = adf_dev_init(accel_dev);
496 	if (unlikely(ret))
497 		goto out;
498 
499 	ret = adf_dev_start(accel_dev);
500 
501 out:
502 	mutex_unlock(&accel_dev->state_lock);
503 	return ret;
504 }
505 EXPORT_SYMBOL_GPL(adf_dev_up);
506 
507 int adf_dev_restart(struct adf_accel_dev *accel_dev)
508 {
509 	int ret = 0;
510 
511 	if (!accel_dev)
512 		return -EFAULT;
513 
514 	adf_dev_down(accel_dev, false);
515 
516 	ret = adf_dev_up(accel_dev, false);
517 	/* if device is already up return success*/
518 	if (ret == -EALREADY)
519 		return 0;
520 
521 	return ret;
522 }
523 EXPORT_SYMBOL_GPL(adf_dev_restart);
524