1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 #include "adf_dbgfs.h"
11 #include "adf_heartbeat.h"
12 #include "adf_rl.h"
13 #include "adf_sysfs_ras_counters.h"
14 #include "adf_telemetry.h"
15
16 static LIST_HEAD(service_table);
17 static DEFINE_MUTEX(service_lock);
18
adf_service_add(struct service_hndl * service)19 static void adf_service_add(struct service_hndl *service)
20 {
21 mutex_lock(&service_lock);
22 list_add(&service->list, &service_table);
23 mutex_unlock(&service_lock);
24 }
25
adf_service_register(struct service_hndl * service)26 int adf_service_register(struct service_hndl *service)
27 {
28 memset(service->init_status, 0, sizeof(service->init_status));
29 memset(service->start_status, 0, sizeof(service->start_status));
30 adf_service_add(service);
31 return 0;
32 }
33
adf_service_remove(struct service_hndl * service)34 static void adf_service_remove(struct service_hndl *service)
35 {
36 mutex_lock(&service_lock);
37 list_del(&service->list);
38 mutex_unlock(&service_lock);
39 }
40
adf_service_unregister(struct service_hndl * service)41 int adf_service_unregister(struct service_hndl *service)
42 {
43 int i;
44
45 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
46 if (service->init_status[i] || service->start_status[i]) {
47 pr_err("QAT: Could not remove active service\n");
48 return -EFAULT;
49 }
50 }
51 adf_service_remove(service);
52 return 0;
53 }
54
55 /**
56 * adf_dev_init() - Init data structures and services for the given accel device
57 * @accel_dev: Pointer to acceleration device.
58 *
59 * Initialize the ring data structures and the admin comms and arbitration
60 * services.
61 *
62 * Return: 0 on success, error code otherwise.
63 */
adf_dev_init(struct adf_accel_dev * accel_dev)64 static int adf_dev_init(struct adf_accel_dev *accel_dev)
65 {
66 struct service_hndl *service;
67 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
68 int ret;
69
70 if (!hw_data) {
71 dev_err(&GET_DEV(accel_dev),
72 "Failed to init device - hw_data not set\n");
73 return -EFAULT;
74 }
75
76 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
77 !accel_dev->is_vf) {
78 dev_err(&GET_DEV(accel_dev), "Device not configured\n");
79 return -EFAULT;
80 }
81
82 if (adf_init_etr_data(accel_dev)) {
83 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
84 return -EFAULT;
85 }
86
87 if (hw_data->init_device && hw_data->init_device(accel_dev)) {
88 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
89 return -EFAULT;
90 }
91
92 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
93 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
94 return -EFAULT;
95 }
96
97 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
98 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
99 return -EFAULT;
100 }
101
102 if (hw_data->get_ring_to_svc_map)
103 hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
104
105 if (adf_ae_init(accel_dev)) {
106 dev_err(&GET_DEV(accel_dev),
107 "Failed to initialise Acceleration Engine\n");
108 return -EFAULT;
109 }
110 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
111
112 if (adf_ae_fw_load(accel_dev)) {
113 dev_err(&GET_DEV(accel_dev),
114 "Failed to load acceleration FW\n");
115 return -EFAULT;
116 }
117 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
118
119 if (hw_data->alloc_irq(accel_dev)) {
120 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
121 return -EFAULT;
122 }
123 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
124
125 if (hw_data->ras_ops.enable_ras_errors)
126 hw_data->ras_ops.enable_ras_errors(accel_dev);
127
128 hw_data->enable_ints(accel_dev);
129 hw_data->enable_error_correction(accel_dev);
130
131 ret = hw_data->pfvf_ops.enable_comms(accel_dev);
132 if (ret)
133 return ret;
134
135 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
136 accel_dev->is_vf) {
137 if (qat_crypto_vf_dev_config(accel_dev))
138 return -EFAULT;
139 }
140
141 adf_heartbeat_init(accel_dev);
142 ret = adf_rl_init(accel_dev);
143 if (ret && ret != -EOPNOTSUPP)
144 return ret;
145
146 ret = adf_tl_init(accel_dev);
147 if (ret && ret != -EOPNOTSUPP)
148 return ret;
149
150 /*
151 * Subservice initialisation is divided into two stages: init and start.
152 * This is to facilitate any ordering dependencies between services
153 * prior to starting any of the accelerators.
154 */
155 list_for_each_entry(service, &service_table, list) {
156 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
157 dev_err(&GET_DEV(accel_dev),
158 "Failed to initialise service %s\n",
159 service->name);
160 return -EFAULT;
161 }
162 set_bit(accel_dev->accel_id, service->init_status);
163 }
164
165 return 0;
166 }
167
168 /**
169 * adf_dev_start() - Start acceleration service for the given accel device
170 * @accel_dev: Pointer to acceleration device.
171 *
172 * Function notifies all the registered services that the acceleration device
173 * is ready to be used.
174 * To be used by QAT device specific drivers.
175 *
176 * Return: 0 on success, error code otherwise.
177 */
adf_dev_start(struct adf_accel_dev * accel_dev)178 static int adf_dev_start(struct adf_accel_dev *accel_dev)
179 {
180 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
181 struct service_hndl *service;
182 int ret;
183
184 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
185
186 if (adf_ae_start(accel_dev)) {
187 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
188 return -EFAULT;
189 }
190 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
191
192 if (hw_data->send_admin_init(accel_dev)) {
193 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
194 return -EFAULT;
195 }
196
197 if (hw_data->measure_clock) {
198 ret = hw_data->measure_clock(accel_dev);
199 if (ret) {
200 dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
201 return ret;
202 }
203 }
204
205 /* Set ssm watch dog timer */
206 if (hw_data->set_ssm_wdtimer)
207 hw_data->set_ssm_wdtimer(accel_dev);
208
209 /* Enable Power Management */
210 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
211 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
212 return -EFAULT;
213 }
214
215 if (hw_data->start_timer) {
216 ret = hw_data->start_timer(accel_dev);
217 if (ret) {
218 dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
219 return ret;
220 }
221 }
222
223 adf_heartbeat_start(accel_dev);
224 ret = adf_rl_start(accel_dev);
225 if (ret && ret != -EOPNOTSUPP)
226 return ret;
227
228 ret = adf_tl_start(accel_dev);
229 if (ret && ret != -EOPNOTSUPP)
230 return ret;
231
232 list_for_each_entry(service, &service_table, list) {
233 if (service->event_hld(accel_dev, ADF_EVENT_START)) {
234 dev_err(&GET_DEV(accel_dev),
235 "Failed to start service %s\n",
236 service->name);
237 return -EFAULT;
238 }
239 set_bit(accel_dev->accel_id, service->start_status);
240 }
241
242 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
243 set_bit(ADF_STATUS_STARTED, &accel_dev->status);
244
245 if (!list_empty(&accel_dev->crypto_list) &&
246 (qat_algs_register() || qat_asym_algs_register())) {
247 dev_err(&GET_DEV(accel_dev),
248 "Failed to register crypto algs\n");
249 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
250 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
251 return -EFAULT;
252 }
253 set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
254
255 if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
256 dev_err(&GET_DEV(accel_dev),
257 "Failed to register compression algs\n");
258 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
259 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
260 return -EFAULT;
261 }
262 set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
263
264 adf_dbgfs_add(accel_dev);
265 adf_sysfs_start_ras(accel_dev);
266
267 return 0;
268 }
269
270 /**
271 * adf_dev_stop() - Stop acceleration service for the given accel device
272 * @accel_dev: Pointer to acceleration device.
273 *
274 * Function notifies all the registered services that the acceleration device
275 * is shuting down.
276 * To be used by QAT device specific drivers.
277 *
278 * Return: void
279 */
adf_dev_stop(struct adf_accel_dev * accel_dev)280 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
281 {
282 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
283 struct service_hndl *service;
284 bool wait = false;
285 int ret;
286
287 if (!adf_dev_started(accel_dev) &&
288 !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
289 return;
290
291 adf_tl_stop(accel_dev);
292 adf_rl_stop(accel_dev);
293 adf_dbgfs_rm(accel_dev);
294 adf_sysfs_stop_ras(accel_dev);
295
296 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
297 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
298
299 if (!list_empty(&accel_dev->crypto_list) &&
300 test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
301 qat_algs_unregister();
302 qat_asym_algs_unregister();
303 }
304 clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
305
306 if (!list_empty(&accel_dev->compression_list) &&
307 test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
308 qat_comp_algs_unregister();
309 clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
310
311 list_for_each_entry(service, &service_table, list) {
312 if (!test_bit(accel_dev->accel_id, service->start_status))
313 continue;
314 ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
315 if (!ret) {
316 clear_bit(accel_dev->accel_id, service->start_status);
317 } else if (ret == -EAGAIN) {
318 wait = true;
319 clear_bit(accel_dev->accel_id, service->start_status);
320 }
321 }
322
323 if (hw_data->stop_timer)
324 hw_data->stop_timer(accel_dev);
325
326 hw_data->disable_iov(accel_dev);
327
328 if (wait)
329 msleep(100);
330
331 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
332 if (adf_ae_stop(accel_dev))
333 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
334 else
335 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
336 }
337 }
338
339 /**
340 * adf_dev_shutdown() - shutdown acceleration services and data strucutures
341 * @accel_dev: Pointer to acceleration device
342 *
343 * Cleanup the ring data structures and the admin comms and arbitration
344 * services.
345 */
adf_dev_shutdown(struct adf_accel_dev * accel_dev)346 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
347 {
348 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
349 struct service_hndl *service;
350
351 if (!hw_data) {
352 dev_err(&GET_DEV(accel_dev),
353 "QAT: Failed to shutdown device - hw_data not set\n");
354 return;
355 }
356
357 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
358 adf_ae_fw_release(accel_dev);
359 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
360 }
361
362 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
363 if (adf_ae_shutdown(accel_dev))
364 dev_err(&GET_DEV(accel_dev),
365 "Failed to shutdown Accel Engine\n");
366 else
367 clear_bit(ADF_STATUS_AE_INITIALISED,
368 &accel_dev->status);
369 }
370
371 list_for_each_entry(service, &service_table, list) {
372 if (!test_bit(accel_dev->accel_id, service->init_status))
373 continue;
374 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
375 dev_err(&GET_DEV(accel_dev),
376 "Failed to shutdown service %s\n",
377 service->name);
378 else
379 clear_bit(accel_dev->accel_id, service->init_status);
380 }
381
382 adf_rl_exit(accel_dev);
383
384 if (hw_data->ras_ops.disable_ras_errors)
385 hw_data->ras_ops.disable_ras_errors(accel_dev);
386
387 adf_heartbeat_shutdown(accel_dev);
388
389 adf_tl_shutdown(accel_dev);
390
391 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
392 hw_data->free_irq(accel_dev);
393 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
394 }
395
396 /* If not restarting, delete all cfg sections except for GENERAL */
397 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
398 adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
399
400 if (hw_data->exit_arb)
401 hw_data->exit_arb(accel_dev);
402
403 if (hw_data->exit_admin_comms)
404 hw_data->exit_admin_comms(accel_dev);
405
406 adf_cleanup_etr_data(accel_dev);
407 adf_dev_restore(accel_dev);
408 }
409
adf_dev_restarting_notify(struct adf_accel_dev * accel_dev)410 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
411 {
412 struct service_hndl *service;
413
414 list_for_each_entry(service, &service_table, list) {
415 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
416 dev_err(&GET_DEV(accel_dev),
417 "Failed to restart service %s.\n",
418 service->name);
419 }
420 return 0;
421 }
422
adf_dev_restarted_notify(struct adf_accel_dev * accel_dev)423 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
424 {
425 struct service_hndl *service;
426
427 list_for_each_entry(service, &service_table, list) {
428 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
429 dev_err(&GET_DEV(accel_dev),
430 "Failed to restart service %s.\n",
431 service->name);
432 }
433 return 0;
434 }
435
adf_error_notifier(struct adf_accel_dev * accel_dev)436 void adf_error_notifier(struct adf_accel_dev *accel_dev)
437 {
438 struct service_hndl *service;
439
440 list_for_each_entry(service, &service_table, list) {
441 if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR))
442 dev_err(&GET_DEV(accel_dev),
443 "Failed to send error event to %s.\n",
444 service->name);
445 }
446 }
447
adf_dev_down(struct adf_accel_dev * accel_dev)448 int adf_dev_down(struct adf_accel_dev *accel_dev)
449 {
450 int ret = 0;
451
452 if (!accel_dev)
453 return -EINVAL;
454
455 mutex_lock(&accel_dev->state_lock);
456
457 adf_dev_stop(accel_dev);
458 adf_dev_shutdown(accel_dev);
459
460 mutex_unlock(&accel_dev->state_lock);
461 return ret;
462 }
463 EXPORT_SYMBOL_GPL(adf_dev_down);
464
adf_dev_up(struct adf_accel_dev * accel_dev,bool config)465 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
466 {
467 int ret = 0;
468
469 if (!accel_dev)
470 return -EINVAL;
471
472 mutex_lock(&accel_dev->state_lock);
473
474 if (adf_dev_started(accel_dev)) {
475 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
476 accel_dev->accel_id);
477 ret = -EALREADY;
478 goto out;
479 }
480
481 if (config && GET_HW_DATA(accel_dev)->dev_config) {
482 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
483 if (unlikely(ret))
484 goto out;
485 }
486
487 ret = adf_dev_init(accel_dev);
488 if (unlikely(ret))
489 goto out;
490
491 ret = adf_dev_start(accel_dev);
492
493 out:
494 mutex_unlock(&accel_dev->state_lock);
495 return ret;
496 }
497 EXPORT_SYMBOL_GPL(adf_dev_up);
498
adf_dev_restart(struct adf_accel_dev * accel_dev)499 int adf_dev_restart(struct adf_accel_dev *accel_dev)
500 {
501 int ret = 0;
502
503 if (!accel_dev)
504 return -EFAULT;
505
506 adf_dev_down(accel_dev);
507
508 ret = adf_dev_up(accel_dev, false);
509 /* if device is already up return success*/
510 if (ret == -EALREADY)
511 return 0;
512
513 return ret;
514 }
515 EXPORT_SYMBOL_GPL(adf_dev_restart);
516