1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_uio_control.h"
13 #include "adf_uio_cleanup.h"
14 #include "adf_uio.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17
18 #define ADF_DEV_PROCESSES_NAME "qat_dev_processes"
19 #define ADF_DEV_STATE_NAME "qat_dev_state"
20
21 #define ADF_STATE_CALLOUT_TIME 10
22
23 static const char *mtx_name = "state_mtx";
24 static const char *mtx_callout_name = "callout_mtx";
25
26 static d_open_t adf_processes_open;
27 static void adf_processes_release(void *data);
28 static d_read_t adf_processes_read;
29 static d_write_t adf_processes_write;
30
31 static d_open_t adf_state_open;
32 static void adf_state_release(void *data);
33 static d_read_t adf_state_read;
34 static int adf_state_kqfilter(struct cdev *dev, struct knote *kn);
35 static int adf_state_kqread_event(struct knote *kn, long hint);
36 static void adf_state_kqread_detach(struct knote *kn);
37
38 static struct callout callout;
39 static struct mtx mtx;
40 static struct mtx callout_mtx;
41 static struct service_hndl adf_state_hndl;
42
43 struct entry_proc_events {
44 struct adf_state_priv_data *proc_events;
45
46 SLIST_ENTRY(entry_proc_events) entries_proc_events;
47 };
48
49 struct entry_state {
50 struct adf_state state;
51
52 STAILQ_ENTRY(entry_state) entries_state;
53 };
54
55 SLIST_HEAD(proc_events_head, entry_proc_events);
56 STAILQ_HEAD(state_head, entry_state);
57
58 static struct proc_events_head proc_events_head;
59
60 struct adf_processes_priv_data {
61 char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
62 int read_flag;
63 struct list_head list;
64 };
65
66 struct adf_state_priv_data {
67 struct cdev *cdev;
68 struct selinfo rsel;
69 struct state_head state_head;
70 };
71
72 static struct cdevsw adf_processes_cdevsw = {
73 .d_version = D_VERSION,
74 .d_open = adf_processes_open,
75 .d_read = adf_processes_read,
76 .d_write = adf_processes_write,
77 .d_name = ADF_DEV_PROCESSES_NAME,
78 };
79
80 static struct cdevsw adf_state_cdevsw = {
81 .d_version = D_VERSION,
82 .d_open = adf_state_open,
83 .d_read = adf_state_read,
84 .d_kqfilter = adf_state_kqfilter,
85 .d_name = ADF_DEV_STATE_NAME,
86 };
87
88 static const struct filterops adf_state_read_filterops = {
89 .f_isfd = 1,
90 .f_attach = NULL,
91 .f_detach = adf_state_kqread_detach,
92 .f_event = adf_state_kqread_event,
93 };
94
95 static struct cdev *adf_processes_dev;
96 static struct cdev *adf_state_dev;
97
98 static LINUX_LIST_HEAD(processes_list);
99
100 struct sx processes_list_sema;
101 SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list");
102
103 static void
adf_chr_drv_destroy(void)104 adf_chr_drv_destroy(void)
105 {
106 destroy_dev(adf_processes_dev);
107 }
108
109 static int
adf_chr_drv_create(void)110 adf_chr_drv_create(void)
111 {
112
113 adf_processes_dev = make_dev(&adf_processes_cdevsw,
114 0,
115 UID_ROOT,
116 GID_WHEEL,
117 0600,
118 ADF_DEV_PROCESSES_NAME);
119 if (adf_processes_dev == NULL) {
120 printf("QAT: failed to create device\n");
121 goto err_cdev_del;
122 }
123 return 0;
124 err_cdev_del:
125 return EFAULT;
126 }
127
128 static int
adf_processes_open(struct cdev * dev,int oflags,int devtype,struct thread * td)129 adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
130 {
131 int i = 0, devices = 0;
132 struct adf_accel_dev *accel_dev = NULL;
133 struct adf_processes_priv_data *prv_data = NULL;
134 int error = 0;
135
136 for (i = 0; i < ADF_MAX_DEVICES; i++) {
137 accel_dev = adf_devmgr_get_dev_by_id(i);
138 if (!accel_dev)
139 continue;
140 if (!adf_dev_started(accel_dev))
141 continue;
142 devices++;
143 }
144 if (!devices) {
145 printf("QAT: No active devices found.\n");
146 return ENXIO;
147 }
148 prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
149 INIT_LIST_HEAD(&prv_data->list);
150 error = devfs_set_cdevpriv(prv_data, adf_processes_release);
151 if (error) {
152 free(prv_data, M_QAT);
153 return error;
154 }
155
156 return 0;
157 }
158
159 static int
adf_get_first_started_dev(void)160 adf_get_first_started_dev(void)
161 {
162 int i = 0;
163 struct adf_accel_dev *accel_dev = NULL;
164
165 for (i = 0; i < ADF_MAX_DEVICES; i++) {
166 accel_dev = adf_devmgr_get_dev_by_id(i);
167 if (!accel_dev)
168 continue;
169 if (adf_dev_started(accel_dev))
170 return i;
171 }
172
173 return -1;
174 }
175
176 static int
adf_processes_write(struct cdev * dev,struct uio * uio,int ioflag)177 adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag)
178 {
179 struct adf_processes_priv_data *prv_data = NULL;
180 struct adf_processes_priv_data *pdata = NULL;
181 int dev_num = 0, pr_num = 0;
182 struct list_head *lpos = NULL;
183 char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 };
184 struct adf_accel_dev *accel_dev = NULL;
185 struct adf_cfg_section *section_ptr = NULL;
186 bool pr_name_available = 1;
187 uint32_t num_accel_devs = 0;
188 int error = 0;
189 ssize_t count;
190 int dev_id;
191
192 error = devfs_get_cdevpriv((void **)&prv_data);
193 if (error) {
194 printf("QAT: invalid file descriptor\n");
195 return error;
196 }
197
198 if (prv_data->read_flag == 1) {
199 printf("QAT: can only write once\n");
200 return EBADF;
201 }
202 count = uio->uio_resid;
203 if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
204 printf("QAT: wrong size %d\n", (int)count);
205 return EIO;
206 }
207
208 error = uiomove(usr_name, count, uio);
209 if (error) {
210 printf("QAT: can't copy data\n");
211 return error;
212 }
213
214 /* Lock other processes and try to find out the process name */
215 if (sx_xlock_sig(&processes_list_sema)) {
216 printf("QAT: can't aquire process info lock\n");
217 return EBADF;
218 }
219
220 dev_id = adf_get_first_started_dev();
221 if (-1 == dev_id) {
222 pr_err("QAT: could not find started device\n");
223 sx_xunlock(&processes_list_sema);
224 return -EIO;
225 }
226
227 accel_dev = adf_devmgr_get_dev_by_id(dev_id);
228 if (!accel_dev) {
229 pr_err("QAT: could not find started device\n");
230 sx_xunlock(&processes_list_sema);
231 return -EIO;
232 }
233
234 /* If there is nothing there then take the first name and return */
235 if (list_empty(&processes_list)) {
236 snprintf(prv_data->name,
237 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
238 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
239 usr_name,
240 0);
241 list_add(&prv_data->list, &processes_list);
242 sx_xunlock(&processes_list_sema);
243 prv_data->read_flag = 1;
244 return 0;
245 }
246
247 /* If there are processes running then search for a first free name */
248 adf_devmgr_get_num_dev(&num_accel_devs);
249 for (dev_num = 0; dev_num < num_accel_devs; dev_num++) {
250 accel_dev = adf_devmgr_get_dev_by_id(dev_num);
251 if (!accel_dev)
252 continue;
253
254 if (!adf_dev_started(accel_dev))
255 continue; /* to next device */
256
257 for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev);
258 pr_num++) {
259 snprintf(prv_data->name,
260 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
261 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
262 usr_name,
263 pr_num);
264 pr_name_available = 1;
265 /* Figure out if section exists in the config table */
266 section_ptr =
267 adf_cfg_sec_find(accel_dev, prv_data->name);
268 if (NULL == section_ptr) {
269 /* This section name doesn't exist */
270 pr_name_available = 0;
271 /* As process_num enumerates from 0, once we get
272 * to one which doesn't exist no further ones
273 * will exist. On to next device
274 */
275 break;
276 }
277 /* Figure out if it's been taken already */
278 list_for_each(lpos, &processes_list)
279 {
280 pdata =
281 list_entry(lpos,
282 struct adf_processes_priv_data,
283 list);
284 if (!strncmp(
285 pdata->name,
286 prv_data->name,
287 ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
288 pr_name_available = 0;
289 break;
290 }
291 }
292 if (pr_name_available)
293 break;
294 }
295 if (pr_name_available)
296 break;
297 }
298 /*
299 * If we have a valid name that is not on
300 * the list take it and add to the list
301 */
302 if (pr_name_available) {
303 list_add(&prv_data->list, &processes_list);
304 sx_xunlock(&processes_list_sema);
305 prv_data->read_flag = 1;
306 return 0;
307 }
308 /* If not then the process needs to wait */
309 sx_xunlock(&processes_list_sema);
310 explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES);
311 prv_data->read_flag = 0;
312 return 1;
313 }
314
315 static int
adf_processes_read(struct cdev * dev,struct uio * uio,int ioflag)316 adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag)
317 {
318 struct adf_processes_priv_data *prv_data = NULL;
319 int error = 0;
320
321 error = devfs_get_cdevpriv((void **)&prv_data);
322 if (error) {
323 printf("QAT: invalid file descriptor\n");
324 return error;
325 }
326
327 /*
328 * If there is a name that the process can use then give it
329 * to the proocess.
330 */
331 if (prv_data->read_flag) {
332 error = uiomove(prv_data->name,
333 strnlen(prv_data->name,
334 ADF_CFG_MAX_SECTION_LEN_IN_BYTES),
335 uio);
336 if (error) {
337 printf("QAT: failed to copy data to user\n");
338 return error;
339 }
340 return 0;
341 }
342
343 return EIO;
344 }
345
346 static void
adf_processes_release(void * data)347 adf_processes_release(void *data)
348 {
349 struct adf_processes_priv_data *prv_data = NULL;
350
351 prv_data = (struct adf_processes_priv_data *)data;
352 sx_xlock(&processes_list_sema);
353 list_del(&prv_data->list);
354 sx_xunlock(&processes_list_sema);
355 free(prv_data, M_QAT);
356 }
357
358 int
adf_processes_dev_register(void)359 adf_processes_dev_register(void)
360 {
361 return adf_chr_drv_create();
362 }
363
364 void
adf_processes_dev_unregister(void)365 adf_processes_dev_unregister(void)
366 {
367 adf_chr_drv_destroy();
368 }
369
370 static void
adf_state_callout_notify_ev(void * arg)371 adf_state_callout_notify_ev(void *arg)
372 {
373 int notified = 0;
374 struct adf_state_priv_data *priv = NULL;
375 struct entry_proc_events *proc_events = NULL;
376
377 SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
378 if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
379 notified = 1;
380 priv = proc_events->proc_events;
381 wakeup(priv);
382 selwakeup(&priv->rsel);
383 KNOTE_UNLOCKED(&priv->rsel.si_note, 0);
384 }
385 }
386 if (notified)
387 callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
388 }
389
390 static void
adf_state_set(int dev,enum adf_event event)391 adf_state_set(int dev, enum adf_event event)
392 {
393 struct adf_accel_dev *accel_dev = NULL;
394 struct state_head *head = NULL;
395 struct entry_proc_events *proc_events = NULL;
396 struct entry_state *state = NULL;
397
398 accel_dev = adf_devmgr_get_dev_by_id(dev);
399 if (!accel_dev)
400 return;
401 mtx_lock(&mtx);
402 SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
403 state = NULL;
404 head = &proc_events->proc_events->state_head;
405 state = malloc(sizeof(struct entry_state),
406 M_QAT,
407 M_NOWAIT | M_ZERO);
408 if (!state)
409 continue;
410 state->state.dev_state = event;
411 state->state.dev_id = dev;
412 STAILQ_INSERT_TAIL(head, state, entries_state);
413 if (event == ADF_EVENT_STOP) {
414 state = NULL;
415 state = malloc(sizeof(struct entry_state),
416 M_QAT,
417 M_NOWAIT | M_ZERO);
418 if (!state)
419 continue;
420 state->state.dev_state = ADF_EVENT_SHUTDOWN;
421 state->state.dev_id = dev;
422 STAILQ_INSERT_TAIL(head, state, entries_state);
423 }
424 }
425 mtx_unlock(&mtx);
426 callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
427 }
428
429 static int
adf_state_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)430 adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event)
431 {
432 int ret = 0;
433
434 #if defined(QAT_UIO) && defined(QAT_DBG)
435 if (event > ADF_EVENT_DBG_SHUTDOWN)
436 return -EINVAL;
437 #else
438 if (event > ADF_EVENT_ERROR)
439 return -EINVAL;
440 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
441
442 switch (event) {
443 case ADF_EVENT_INIT:
444 return ret;
445 case ADF_EVENT_SHUTDOWN:
446 return ret;
447 case ADF_EVENT_RESTARTING:
448 break;
449 case ADF_EVENT_RESTARTED:
450 break;
451 case ADF_EVENT_START:
452 return ret;
453 case ADF_EVENT_STOP:
454 break;
455 case ADF_EVENT_ERROR:
456 break;
457 #if defined(QAT_UIO) && defined(QAT_DBG)
458 case ADF_EVENT_PROC_CRASH:
459 break;
460 case ADF_EVENT_MANUAL_DUMP:
461 break;
462 case ADF_EVENT_SLICE_HANG:
463 break;
464 case ADF_EVENT_DBG_SHUTDOWN:
465 break;
466 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
467 default:
468 return -1;
469 }
470
471 adf_state_set(accel_dev->accel_id, event);
472
473 return 0;
474 }
475
476 static int
adf_state_kqfilter(struct cdev * dev,struct knote * kn)477 adf_state_kqfilter(struct cdev *dev, struct knote *kn)
478 {
479 struct adf_state_priv_data *priv;
480
481 mtx_lock(&mtx);
482 priv = dev->si_drv1;
483 switch (kn->kn_filter) {
484 case EVFILT_READ:
485 kn->kn_fop = &adf_state_read_filterops;
486 kn->kn_hook = priv;
487 knlist_add(&priv->rsel.si_note, kn, 1);
488 mtx_unlock(&mtx);
489 return 0;
490 default:
491 mtx_unlock(&mtx);
492 return -EINVAL;
493 }
494 }
495
496 static int
adf_state_kqread_event(struct knote * kn,long hint)497 adf_state_kqread_event(struct knote *kn, long hint)
498 {
499 return 1;
500 }
501
502 static void
adf_state_kqread_detach(struct knote * kn)503 adf_state_kqread_detach(struct knote *kn)
504 {
505 struct adf_state_priv_data *priv = NULL;
506
507 mtx_lock(&mtx);
508 if (!kn) {
509 mtx_unlock(&mtx);
510 return;
511 }
512 priv = kn->kn_hook;
513 if (!priv) {
514 mtx_unlock(&mtx);
515 return;
516 }
517 knlist_remove(&priv->rsel.si_note, kn, 1);
518 mtx_unlock(&mtx);
519 }
520
521 void
adf_state_init(void)522 adf_state_init(void)
523 {
524 adf_state_dev = make_dev(&adf_state_cdevsw,
525 0,
526 UID_ROOT,
527 GID_WHEEL,
528 0600,
529 "%s",
530 ADF_DEV_STATE_NAME);
531 SLIST_INIT(&proc_events_head);
532 mtx_init(&mtx, mtx_name, NULL, MTX_DEF);
533 mtx_init(&callout_mtx, mtx_callout_name, NULL, MTX_DEF);
534 callout_init_mtx(&callout, &callout_mtx, 0);
535 explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl));
536 adf_state_hndl.event_hld = adf_state_event_handler;
537 adf_state_hndl.name = "adf_state_event_handler";
538 adf_service_register(&adf_state_hndl);
539 callout_reset(&callout,
540 ADF_STATE_CALLOUT_TIME,
541 adf_state_callout_notify_ev,
542 NULL);
543 }
544
545 void
adf_state_destroy(void)546 adf_state_destroy(void)
547 {
548 struct entry_proc_events *proc_events = NULL;
549
550 adf_service_unregister(&adf_state_hndl);
551 mtx_lock(&callout_mtx);
552 callout_stop(&callout);
553 mtx_unlock(&callout_mtx);
554 mtx_destroy(&callout_mtx);
555 mtx_lock(&mtx);
556 while (!SLIST_EMPTY(&proc_events_head)) {
557 proc_events = SLIST_FIRST(&proc_events_head);
558 SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events);
559 free(proc_events, M_QAT);
560 }
561 mtx_unlock(&mtx);
562 mtx_destroy(&mtx);
563 destroy_dev(adf_state_dev);
564 }
565
566 static int
adf_state_open(struct cdev * dev,int oflags,int devtype,struct thread * td)567 adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
568 {
569 struct adf_state_priv_data *prv_data = NULL;
570 struct entry_proc_events *entry_proc_events = NULL;
571 int ret = 0;
572
573 prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
574 entry_proc_events =
575 malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
576 mtx_lock(&mtx);
577 prv_data->cdev = dev;
578 prv_data->cdev->si_drv1 = prv_data;
579 knlist_init_mtx(&prv_data->rsel.si_note, &mtx);
580 STAILQ_INIT(&prv_data->state_head);
581 entry_proc_events->proc_events = prv_data;
582 SLIST_INSERT_HEAD(&proc_events_head,
583 entry_proc_events,
584 entries_proc_events);
585 mtx_unlock(&mtx);
586 ret = devfs_set_cdevpriv(prv_data, adf_state_release);
587 if (ret) {
588 SLIST_REMOVE(&proc_events_head,
589 entry_proc_events,
590 entry_proc_events,
591 entries_proc_events);
592 free(entry_proc_events, M_QAT);
593 free(prv_data, M_QAT);
594 }
595 callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
596 return ret;
597 }
598
599 static int
adf_state_read(struct cdev * dev,struct uio * uio,int ioflag)600 adf_state_read(struct cdev *dev, struct uio *uio, int ioflag)
601 {
602 int ret = 0;
603 struct adf_state_priv_data *prv_data = NULL;
604 struct state_head *state_head = NULL;
605 struct entry_state *entry_state = NULL;
606 struct adf_state *state = NULL;
607 struct entry_proc_events *proc_events = NULL;
608
609 mtx_lock(&mtx);
610 ret = devfs_get_cdevpriv((void **)&prv_data);
611 if (ret) {
612 mtx_unlock(&mtx);
613 return 0;
614 }
615 state_head = &prv_data->state_head;
616 if (STAILQ_EMPTY(state_head)) {
617 mtx_unlock(&mtx);
618 return 0;
619 }
620 entry_state = STAILQ_FIRST(state_head);
621 state = &entry_state->state;
622 ret = uiomove(state, sizeof(struct adf_state), uio);
623 if (!ret && !STAILQ_EMPTY(state_head)) {
624 STAILQ_REMOVE_HEAD(state_head, entries_state);
625 free(entry_state, M_QAT);
626 }
627 SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
628 if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
629 prv_data = proc_events->proc_events;
630 wakeup(prv_data);
631 selwakeup(&prv_data->rsel);
632 KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0);
633 }
634 }
635 mtx_unlock(&mtx);
636 callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
637 return ret;
638 }
639
640 static void
adf_state_release(void * data)641 adf_state_release(void *data)
642 {
643 struct adf_state_priv_data *prv_data = NULL;
644 struct entry_state *entry_state = NULL;
645 struct entry_proc_events *entry_proc_events = NULL;
646 struct entry_proc_events *tmp = NULL;
647
648 mtx_lock(&mtx);
649 prv_data = (struct adf_state_priv_data *)data;
650 knlist_delete(&prv_data->rsel.si_note, curthread, 1);
651 knlist_destroy(&prv_data->rsel.si_note);
652 seldrain(&prv_data->rsel);
653 while (!STAILQ_EMPTY(&prv_data->state_head)) {
654 entry_state = STAILQ_FIRST(&prv_data->state_head);
655 STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state);
656 free(entry_state, M_QAT);
657 }
658 SLIST_FOREACH_SAFE (entry_proc_events,
659 &proc_events_head,
660 entries_proc_events,
661 tmp) {
662 if (entry_proc_events->proc_events == prv_data) {
663 SLIST_REMOVE(&proc_events_head,
664 entry_proc_events,
665 entry_proc_events,
666 entries_proc_events);
667 free(entry_proc_events, M_QAT);
668 }
669 }
670 free(prv_data, M_QAT);
671 mtx_unlock(&mtx);
672 }
673