1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright IBM Corp. 2001, 2018
4 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
13 */
14
15 #define pr_fmt(fmt) "zcrypt: " fmt
16
17 #include <linux/export.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/miscdevice.h>
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 #include <linux/atomic.h>
25 #include <linux/uaccess.h>
26 #include <linux/hw_random.h>
27 #include <linux/debugfs.h>
28 #include <linux/cdev.h>
29 #include <linux/ctype.h>
30 #include <linux/capability.h>
31 #include <asm/debug.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <asm/trace/zcrypt.h>
35
36 #include "zcrypt_api.h"
37 #include "zcrypt_debug.h"
38
39 #include "zcrypt_msgtype6.h"
40 #include "zcrypt_msgtype50.h"
41 #include "zcrypt_ccamisc.h"
42 #include "zcrypt_ep11misc.h"
43
44 /*
45 * Module description.
46 */
47 MODULE_AUTHOR("IBM Corporation");
48 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
49 "Copyright IBM Corp. 2001, 2012");
50 MODULE_LICENSE("GPL");
51
52 unsigned int zcrypt_mempool_threshold = 5;
53 module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0400);
54 MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)");
55
56 /*
57 * zcrypt tracepoint functions
58 */
59 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
60 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
61
62 DEFINE_SPINLOCK(zcrypt_list_lock);
63 LIST_HEAD(zcrypt_card_list);
64
65 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
66
67 static LIST_HEAD(zcrypt_ops_list);
68
69 /* Zcrypt related debug feature stuff. */
70 debug_info_t *zcrypt_dbf_info;
71
72 /*
73 * Process a rescan of the transport layer.
74 * Runs a synchronous AP bus rescan.
75 * Returns true if something has changed (for example the
76 * bus scan has found and build up new devices) and it is
77 * worth to do a retry. Otherwise false is returned meaning
78 * no changes on the AP bus level.
79 */
zcrypt_process_rescan(void)80 static inline bool zcrypt_process_rescan(void)
81 {
82 return ap_bus_force_rescan();
83 }
84
zcrypt_msgtype_register(struct zcrypt_ops * zops)85 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
86 {
87 list_add_tail(&zops->list, &zcrypt_ops_list);
88 }
89
zcrypt_msgtype_unregister(struct zcrypt_ops * zops)90 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
91 {
92 list_del_init(&zops->list);
93 }
94
zcrypt_msgtype(unsigned char * name,int variant)95 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
96 {
97 struct zcrypt_ops *zops;
98
99 list_for_each_entry(zops, &zcrypt_ops_list, list)
100 if (zops->variant == variant &&
101 (!strncmp(zops->name, name, sizeof(zops->name))))
102 return zops;
103 return NULL;
104 }
105 EXPORT_SYMBOL(zcrypt_msgtype);
106
107 /*
108 * Multi device nodes extension functions.
109 */
110
111 struct zcdn_device;
112
113 static void zcdn_device_release(struct device *dev);
114 static const struct class zcrypt_class = {
115 .name = ZCRYPT_NAME,
116 .dev_release = zcdn_device_release,
117 };
118 static dev_t zcrypt_devt;
119 static struct cdev zcrypt_cdev;
120
121 struct zcdn_device {
122 struct device device;
123 struct ap_perms perms;
124 };
125
126 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
127
128 #define ZCDN_MAX_NAME 32
129
130 static int zcdn_create(const char *name);
131 static int zcdn_destroy(const char *name);
132
133 /*
134 * Find zcdn device by name.
135 * Returns reference to the zcdn device which needs to be released
136 * with put_device() after use.
137 */
find_zcdndev_by_name(const char * name)138 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
139 {
140 struct device *dev = class_find_device_by_name(&zcrypt_class, name);
141
142 return dev ? to_zcdn_dev(dev) : NULL;
143 }
144
145 /*
146 * Find zcdn device by devt value.
147 * Returns reference to the zcdn device which needs to be released
148 * with put_device() after use.
149 */
find_zcdndev_by_devt(dev_t devt)150 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
151 {
152 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
153
154 return dev ? to_zcdn_dev(dev) : NULL;
155 }
156
ioctlmask_show(struct device * dev,struct device_attribute * attr,char * buf)157 static ssize_t ioctlmask_show(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160 {
161 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
162 int i, n;
163
164 if (mutex_lock_interruptible(&ap_attr_mutex))
165 return -ERESTARTSYS;
166
167 n = sysfs_emit(buf, "0x");
168 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
169 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
170 n += sysfs_emit_at(buf, n, "\n");
171
172 mutex_unlock(&ap_attr_mutex);
173
174 return n;
175 }
176
ioctlmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)177 static ssize_t ioctlmask_store(struct device *dev,
178 struct device_attribute *attr,
179 const char *buf, size_t count)
180 {
181 int rc;
182 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
183
184 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
185 AP_IOCTLS, &ap_attr_mutex);
186 if (rc)
187 return rc;
188
189 return count;
190 }
191
192 static DEVICE_ATTR_RW(ioctlmask);
193
apmask_show(struct device * dev,struct device_attribute * attr,char * buf)194 static ssize_t apmask_show(struct device *dev,
195 struct device_attribute *attr,
196 char *buf)
197 {
198 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
199 int i, n;
200
201 if (mutex_lock_interruptible(&ap_attr_mutex))
202 return -ERESTARTSYS;
203
204 n = sysfs_emit(buf, "0x");
205 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
206 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
207 n += sysfs_emit_at(buf, n, "\n");
208
209 mutex_unlock(&ap_attr_mutex);
210
211 return n;
212 }
213
apmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)214 static ssize_t apmask_store(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count)
217 {
218 int rc;
219 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
220
221 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
222 AP_DEVICES, &ap_attr_mutex);
223 if (rc)
224 return rc;
225
226 return count;
227 }
228
229 static DEVICE_ATTR_RW(apmask);
230
aqmask_show(struct device * dev,struct device_attribute * attr,char * buf)231 static ssize_t aqmask_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234 {
235 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
236 int i, n;
237
238 if (mutex_lock_interruptible(&ap_attr_mutex))
239 return -ERESTARTSYS;
240
241 n = sysfs_emit(buf, "0x");
242 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
243 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
244 n += sysfs_emit_at(buf, n, "\n");
245
246 mutex_unlock(&ap_attr_mutex);
247
248 return n;
249 }
250
aqmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)251 static ssize_t aqmask_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t count)
254 {
255 int rc;
256 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
257
258 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
259 AP_DOMAINS, &ap_attr_mutex);
260 if (rc)
261 return rc;
262
263 return count;
264 }
265
266 static DEVICE_ATTR_RW(aqmask);
267
admask_show(struct device * dev,struct device_attribute * attr,char * buf)268 static ssize_t admask_show(struct device *dev,
269 struct device_attribute *attr,
270 char *buf)
271 {
272 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
273 int i, n;
274
275 if (mutex_lock_interruptible(&ap_attr_mutex))
276 return -ERESTARTSYS;
277
278 n = sysfs_emit(buf, "0x");
279 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
280 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
281 n += sysfs_emit_at(buf, n, "\n");
282
283 mutex_unlock(&ap_attr_mutex);
284
285 return n;
286 }
287
admask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)288 static ssize_t admask_store(struct device *dev,
289 struct device_attribute *attr,
290 const char *buf, size_t count)
291 {
292 int rc;
293 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
294
295 rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
296 AP_DOMAINS, &ap_attr_mutex);
297 if (rc)
298 return rc;
299
300 return count;
301 }
302
303 static DEVICE_ATTR_RW(admask);
304
305 static struct attribute *zcdn_dev_attrs[] = {
306 &dev_attr_ioctlmask.attr,
307 &dev_attr_apmask.attr,
308 &dev_attr_aqmask.attr,
309 &dev_attr_admask.attr,
310 NULL
311 };
312
313 static struct attribute_group zcdn_dev_attr_group = {
314 .attrs = zcdn_dev_attrs
315 };
316
317 static const struct attribute_group *zcdn_dev_attr_groups[] = {
318 &zcdn_dev_attr_group,
319 NULL
320 };
321
zcdn_create_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)322 static ssize_t zcdn_create_store(const struct class *class,
323 const struct class_attribute *attr,
324 const char *buf, size_t count)
325 {
326 int rc;
327 char name[ZCDN_MAX_NAME];
328
329 strscpy(name, skip_spaces(buf), sizeof(name));
330
331 rc = zcdn_create(strim(name));
332
333 return rc ? rc : count;
334 }
335
336 static const struct class_attribute class_attr_zcdn_create =
337 __ATTR(create, 0600, NULL, zcdn_create_store);
338
zcdn_destroy_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)339 static ssize_t zcdn_destroy_store(const struct class *class,
340 const struct class_attribute *attr,
341 const char *buf, size_t count)
342 {
343 int rc;
344 char name[ZCDN_MAX_NAME];
345
346 strscpy(name, skip_spaces(buf), sizeof(name));
347
348 rc = zcdn_destroy(strim(name));
349
350 return rc ? rc : count;
351 }
352
353 static const struct class_attribute class_attr_zcdn_destroy =
354 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
355
zcdn_device_release(struct device * dev)356 static void zcdn_device_release(struct device *dev)
357 {
358 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
359
360 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
361 __func__, MAJOR(dev->devt), MINOR(dev->devt));
362
363 kfree(zcdndev);
364 }
365
zcdn_create(const char * name)366 static int zcdn_create(const char *name)
367 {
368 dev_t devt;
369 int i, rc = 0;
370 struct zcdn_device *zcdndev;
371
372 if (mutex_lock_interruptible(&ap_attr_mutex))
373 return -ERESTARTSYS;
374
375 /* check if device node with this name already exists */
376 if (name[0]) {
377 zcdndev = find_zcdndev_by_name(name);
378 if (zcdndev) {
379 put_device(&zcdndev->device);
380 rc = -EEXIST;
381 goto unlockout;
382 }
383 }
384
385 /* find an unused minor number */
386 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
387 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
388 zcdndev = find_zcdndev_by_devt(devt);
389 if (zcdndev)
390 put_device(&zcdndev->device);
391 else
392 break;
393 }
394 if (i == ZCRYPT_MAX_MINOR_NODES) {
395 rc = -ENOSPC;
396 goto unlockout;
397 }
398
399 /* alloc and prepare a new zcdn device */
400 zcdndev = kzalloc_obj(*zcdndev);
401 if (!zcdndev) {
402 rc = -ENOMEM;
403 goto unlockout;
404 }
405 zcdndev->device.release = zcdn_device_release;
406 zcdndev->device.class = &zcrypt_class;
407 zcdndev->device.devt = devt;
408 zcdndev->device.groups = zcdn_dev_attr_groups;
409 if (name[0])
410 rc = dev_set_name(&zcdndev->device, "%s", name);
411 else
412 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
413 if (rc) {
414 kfree(zcdndev);
415 goto unlockout;
416 }
417 rc = device_register(&zcdndev->device);
418 if (rc) {
419 put_device(&zcdndev->device);
420 goto unlockout;
421 }
422
423 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
424 __func__, MAJOR(devt), MINOR(devt));
425
426 unlockout:
427 mutex_unlock(&ap_attr_mutex);
428 return rc;
429 }
430
zcdn_destroy(const char * name)431 static int zcdn_destroy(const char *name)
432 {
433 int rc = 0;
434 struct zcdn_device *zcdndev;
435
436 if (mutex_lock_interruptible(&ap_attr_mutex))
437 return -ERESTARTSYS;
438
439 /* try to find this zcdn device */
440 zcdndev = find_zcdndev_by_name(name);
441 if (!zcdndev) {
442 rc = -ENOENT;
443 goto unlockout;
444 }
445
446 /*
447 * The zcdn device is not hard destroyed. It is subject to
448 * reference counting and thus just needs to be unregistered.
449 */
450 put_device(&zcdndev->device);
451 device_unregister(&zcdndev->device);
452
453 unlockout:
454 mutex_unlock(&ap_attr_mutex);
455 return rc;
456 }
457
zcdn_destroy_all(void)458 static void zcdn_destroy_all(void)
459 {
460 int i;
461 dev_t devt;
462 struct zcdn_device *zcdndev;
463
464 mutex_lock(&ap_attr_mutex);
465 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
466 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
467 zcdndev = find_zcdndev_by_devt(devt);
468 if (zcdndev) {
469 put_device(&zcdndev->device);
470 device_unregister(&zcdndev->device);
471 }
472 }
473 mutex_unlock(&ap_attr_mutex);
474 }
475
476 /*
477 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
478 *
479 * This function is not supported beyond zcrypt 1.3.1.
480 */
zcrypt_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)481 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
482 size_t count, loff_t *f_pos)
483 {
484 return -EPERM;
485 }
486
487 /*
488 * zcrypt_write(): Not allowed.
489 *
490 * Write is not allowed
491 */
zcrypt_write(struct file * filp,const char __user * buf,size_t count,loff_t * f_pos)492 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
493 size_t count, loff_t *f_pos)
494 {
495 return -EPERM;
496 }
497
498 /*
499 * zcrypt_open(): Count number of users.
500 *
501 * Device open function to count number of users.
502 */
zcrypt_open(struct inode * inode,struct file * filp)503 static int zcrypt_open(struct inode *inode, struct file *filp)
504 {
505 struct ap_perms *perms = &ap_perms;
506
507 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
508 struct zcdn_device *zcdndev;
509
510 if (mutex_lock_interruptible(&ap_attr_mutex))
511 return -ERESTARTSYS;
512 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
513 /* find returns a reference, no get_device() needed */
514 mutex_unlock(&ap_attr_mutex);
515 if (zcdndev)
516 perms = &zcdndev->perms;
517 }
518 filp->private_data = (void *)perms;
519
520 atomic_inc(&zcrypt_open_count);
521 return stream_open(inode, filp);
522 }
523
524 /*
525 * zcrypt_release(): Count number of users.
526 *
527 * Device close function to count number of users.
528 */
zcrypt_release(struct inode * inode,struct file * filp)529 static int zcrypt_release(struct inode *inode, struct file *filp)
530 {
531 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
532 struct zcdn_device *zcdndev;
533
534 mutex_lock(&ap_attr_mutex);
535 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
536 mutex_unlock(&ap_attr_mutex);
537 if (zcdndev) {
538 /* 2 puts here: one for find, one for open */
539 put_device(&zcdndev->device);
540 put_device(&zcdndev->device);
541 }
542 }
543
544 atomic_dec(&zcrypt_open_count);
545 return 0;
546 }
547
zcrypt_check_ioctl(struct ap_perms * perms,unsigned int cmd)548 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
549 unsigned int cmd)
550 {
551 int rc = -EPERM;
552 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
553
554 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
555 if (test_bit_inv(ioctlnr, perms->ioctlm))
556 rc = 0;
557 }
558
559 if (rc)
560 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
561 __func__, ioctlnr, rc);
562
563 return rc;
564 }
565
zcrypt_check_card(struct ap_perms * perms,int card)566 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
567 {
568 return test_bit_inv(card, perms->apm) ? true : false;
569 }
570
zcrypt_check_queue(struct ap_perms * perms,int queue)571 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
572 {
573 return test_bit_inv(queue, perms->aqm) ? true : false;
574 }
575
zcrypt_pick_queue(struct zcrypt_card * zc,struct zcrypt_queue * zq,struct module ** pmod,unsigned int weight)576 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
577 struct zcrypt_queue *zq,
578 struct module **pmod,
579 unsigned int weight)
580 {
581 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
582 return NULL;
583 zcrypt_card_get(zc);
584 zcrypt_queue_get(zq);
585 get_device(&zq->queue->ap_dev.device);
586 atomic_add(weight, &zc->load);
587 atomic_add(weight, &zq->load);
588 zq->request_count++;
589 *pmod = zq->queue->ap_dev.device.driver->owner;
590 return zq;
591 }
592
zcrypt_drop_queue(struct zcrypt_card * zc,struct zcrypt_queue * zq,struct module * mod,unsigned int weight)593 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
594 struct zcrypt_queue *zq,
595 struct module *mod,
596 unsigned int weight)
597 {
598 zq->request_count--;
599 atomic_sub(weight, &zc->load);
600 atomic_sub(weight, &zq->load);
601 put_device(&zq->queue->ap_dev.device);
602 zcrypt_queue_put(zq);
603 zcrypt_card_put(zc);
604 module_put(mod);
605 }
606
zcrypt_card_compare(struct zcrypt_card * zc,struct zcrypt_card * pref_zc,unsigned int weight,unsigned int pref_weight)607 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
608 struct zcrypt_card *pref_zc,
609 unsigned int weight,
610 unsigned int pref_weight)
611 {
612 if (!pref_zc)
613 return true;
614 weight += atomic_read(&zc->load);
615 pref_weight += atomic_read(&pref_zc->load);
616 if (weight == pref_weight)
617 return atomic64_read(&zc->card->total_request_count) <
618 atomic64_read(&pref_zc->card->total_request_count);
619 return weight < pref_weight;
620 }
621
zcrypt_queue_compare(struct zcrypt_queue * zq,struct zcrypt_queue * pref_zq,unsigned int weight,unsigned int pref_weight)622 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
623 struct zcrypt_queue *pref_zq,
624 unsigned int weight,
625 unsigned int pref_weight)
626 {
627 if (!pref_zq)
628 return true;
629 weight += atomic_read(&zq->load);
630 pref_weight += atomic_read(&pref_zq->load);
631 if (weight == pref_weight)
632 return zq->queue->total_request_count <
633 pref_zq->queue->total_request_count;
634 return weight < pref_weight;
635 }
636
637 /*
638 * zcrypt ioctls.
639 */
zcrypt_rsa_modexpo(struct ap_perms * perms,struct zcrypt_track * tr,struct ica_rsa_modexpo * mex)640 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
641 struct zcrypt_track *tr,
642 struct ica_rsa_modexpo *mex)
643 {
644 struct zcrypt_card *zc, *pref_zc;
645 struct zcrypt_queue *zq, *pref_zq;
646 struct ap_message ap_msg;
647 unsigned int wgt = 0, pref_wgt = 0;
648 unsigned int func_code = 0;
649 int cpen, qpen, qid = 0, rc;
650 struct module *mod;
651
652 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
653
654 rc = ap_init_apmsg(&ap_msg, 0);
655 if (rc)
656 goto out;
657
658 if (mex->outputdatalength < mex->inputdatalength) {
659 rc = -EINVAL;
660 goto out;
661 }
662
663 /*
664 * As long as outputdatalength is big enough, we can set the
665 * outputdatalength equal to the inputdatalength, since that is the
666 * number of bytes we will copy in any case
667 */
668 mex->outputdatalength = mex->inputdatalength;
669
670 rc = get_rsa_modex_fc(mex, &func_code);
671 if (rc)
672 goto out;
673
674 pref_zc = NULL;
675 pref_zq = NULL;
676 spin_lock(&zcrypt_list_lock);
677 for_each_zcrypt_card(zc) {
678 /* Check for usable accelerator or CCA card */
679 if (!zc->online || !zc->card->config || zc->card->chkstop ||
680 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
681 continue;
682 /* Check for size limits */
683 if (zc->min_mod_size > mex->inputdatalength ||
684 zc->max_mod_size < mex->inputdatalength)
685 continue;
686 /* check if device node has admission for this card */
687 if (!zcrypt_check_card(perms, zc->card->id))
688 continue;
689 /* get weight index of the card device */
690 wgt = zc->speed_rating[func_code];
691 /* penalty if this msg was previously sent via this card */
692 cpen = (tr && tr->again_counter && tr->last_qid &&
693 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
694 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
695 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
696 continue;
697 for_each_zcrypt_queue(zq, zc) {
698 /* check if device is usable and eligible */
699 if (!zq->online || !zq->ops->rsa_modexpo ||
700 !ap_queue_usable(zq->queue))
701 continue;
702 /* check if device node has admission for this queue */
703 if (!zcrypt_check_queue(perms,
704 AP_QID_QUEUE(zq->queue->qid)))
705 continue;
706 /* penalty if the msg was previously sent at this qid */
707 qpen = (tr && tr->again_counter && tr->last_qid &&
708 tr->last_qid == zq->queue->qid) ?
709 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
710 if (!zcrypt_queue_compare(zq, pref_zq,
711 wgt + cpen + qpen, pref_wgt))
712 continue;
713 pref_zc = zc;
714 pref_zq = zq;
715 pref_wgt = wgt + cpen + qpen;
716 }
717 }
718 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
719 spin_unlock(&zcrypt_list_lock);
720
721 if (!pref_zq) {
722 pr_debug("no matching queue found => ENODEV\n");
723 rc = -ENODEV;
724 goto out;
725 }
726
727 qid = pref_zq->queue->qid;
728 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
729
730 spin_lock(&zcrypt_list_lock);
731 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
732 spin_unlock(&zcrypt_list_lock);
733
734 out:
735 ap_release_apmsg(&ap_msg);
736 if (tr) {
737 tr->last_rc = rc;
738 tr->last_qid = qid;
739 }
740 trace_s390_zcrypt_rep(mex, func_code, rc,
741 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
742 ap_msg.psmid);
743 return rc;
744 }
745
zcrypt_rsa_crt(struct ap_perms * perms,struct zcrypt_track * tr,struct ica_rsa_modexpo_crt * crt)746 static long zcrypt_rsa_crt(struct ap_perms *perms,
747 struct zcrypt_track *tr,
748 struct ica_rsa_modexpo_crt *crt)
749 {
750 struct zcrypt_card *zc, *pref_zc;
751 struct zcrypt_queue *zq, *pref_zq;
752 struct ap_message ap_msg;
753 unsigned int wgt = 0, pref_wgt = 0;
754 unsigned int func_code = 0;
755 int cpen, qpen, qid = 0, rc;
756 struct module *mod;
757
758 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
759
760 rc = ap_init_apmsg(&ap_msg, 0);
761 if (rc)
762 goto out;
763
764 if (crt->outputdatalength < crt->inputdatalength) {
765 rc = -EINVAL;
766 goto out;
767 }
768
769 /*
770 * As long as outputdatalength is big enough, we can set the
771 * outputdatalength equal to the inputdatalength, since that is the
772 * number of bytes we will copy in any case
773 */
774 crt->outputdatalength = crt->inputdatalength;
775
776 rc = get_rsa_crt_fc(crt, &func_code);
777 if (rc)
778 goto out;
779
780 pref_zc = NULL;
781 pref_zq = NULL;
782 spin_lock(&zcrypt_list_lock);
783 for_each_zcrypt_card(zc) {
784 /* Check for usable accelerator or CCA card */
785 if (!zc->online || !zc->card->config || zc->card->chkstop ||
786 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
787 continue;
788 /* Check for size limits */
789 if (zc->min_mod_size > crt->inputdatalength ||
790 zc->max_mod_size < crt->inputdatalength)
791 continue;
792 /* check if device node has admission for this card */
793 if (!zcrypt_check_card(perms, zc->card->id))
794 continue;
795 /* get weight index of the card device */
796 wgt = zc->speed_rating[func_code];
797 /* penalty if this msg was previously sent via this card */
798 cpen = (tr && tr->again_counter && tr->last_qid &&
799 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
800 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
801 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
802 continue;
803 for_each_zcrypt_queue(zq, zc) {
804 /* check if device is usable and eligible */
805 if (!zq->online || !zq->ops->rsa_modexpo_crt ||
806 !ap_queue_usable(zq->queue))
807 continue;
808 /* check if device node has admission for this queue */
809 if (!zcrypt_check_queue(perms,
810 AP_QID_QUEUE(zq->queue->qid)))
811 continue;
812 /* penalty if the msg was previously sent at this qid */
813 qpen = (tr && tr->again_counter && tr->last_qid &&
814 tr->last_qid == zq->queue->qid) ?
815 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
816 if (!zcrypt_queue_compare(zq, pref_zq,
817 wgt + cpen + qpen, pref_wgt))
818 continue;
819 pref_zc = zc;
820 pref_zq = zq;
821 pref_wgt = wgt + cpen + qpen;
822 }
823 }
824 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
825 spin_unlock(&zcrypt_list_lock);
826
827 if (!pref_zq) {
828 pr_debug("no matching queue found => ENODEV\n");
829 rc = -ENODEV;
830 goto out;
831 }
832
833 qid = pref_zq->queue->qid;
834 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
835
836 spin_lock(&zcrypt_list_lock);
837 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
838 spin_unlock(&zcrypt_list_lock);
839
840 out:
841 ap_release_apmsg(&ap_msg);
842 if (tr) {
843 tr->last_rc = rc;
844 tr->last_qid = qid;
845 }
846 trace_s390_zcrypt_rep(crt, func_code, rc,
847 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
848 ap_msg.psmid);
849 return rc;
850 }
851
_zcrypt_send_cprb(u32 xflags,struct ap_perms * perms,struct zcrypt_track * tr,struct ica_xcRB * xcrb)852 static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms,
853 struct zcrypt_track *tr,
854 struct ica_xcRB *xcrb)
855 {
856 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
857 unsigned int card, domain, func_code = 0;
858 unsigned int wgt = 0, pref_wgt = 0;
859 struct zcrypt_queue *zq, *pref_zq;
860 struct zcrypt_card *zc, *pref_zc;
861 int cpen, qpen, qid = 0, rc;
862 struct ap_message ap_msg;
863 struct module *mod;
864
865 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
866
867 xcrb->status = 0;
868
869 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
870 AP_MSG_FLAG_MEMPOOL : 0);
871 if (rc)
872 goto out;
873
874 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
875 if (rc)
876 goto out;
877 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
878 ap_msg.msg, ap_msg.len, false);
879
880 if (perms != &ap_perms && domain < AP_DOMAINS) {
881 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
882 if (!test_bit_inv(domain, perms->adm)) {
883 rc = -ENODEV;
884 goto out;
885 }
886 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
887 rc = -EOPNOTSUPP;
888 goto out;
889 }
890 }
891 /*
892 * If a valid target domain is set and this domain is NOT a usage
893 * domain but a control only domain, autoselect target domain.
894 */
895 if (domain < AP_DOMAINS &&
896 !ap_test_config_usage_domain(domain) &&
897 ap_test_config_ctrl_domain(domain))
898 domain = AUTOSEL_DOM;
899
900 pref_zc = NULL;
901 pref_zq = NULL;
902 card = xcrb->user_defined;
903 spin_lock(&zcrypt_list_lock);
904 for_each_zcrypt_card(zc) {
905 /* Check for usable CCA card */
906 if (!zc->online || !zc->card->config || zc->card->chkstop ||
907 !zc->card->hwinfo.cca)
908 continue;
909 /* Check for user selected CCA card */
910 if (card != AUTOSELECT && card != zc->card->id)
911 continue;
912 /* check if request size exceeds card max msg size */
913 if (ap_msg.len > zc->card->maxmsgsize)
914 continue;
915 /* check if device node has admission for this card */
916 if (!zcrypt_check_card(perms, zc->card->id))
917 continue;
918 /* get weight index of the card device */
919 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
920 /* penalty if this msg was previously sent via this card */
921 cpen = (tr && tr->again_counter && tr->last_qid &&
922 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
923 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
924 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
925 continue;
926 for_each_zcrypt_queue(zq, zc) {
927 /* check for device usable and eligible */
928 if (!zq->online || !zq->ops->send_cprb ||
929 !ap_queue_usable(zq->queue) ||
930 (domain != AUTOSEL_DOM &&
931 domain != AP_QID_QUEUE(zq->queue->qid)))
932 continue;
933 /* check if device node has admission for this queue */
934 if (!zcrypt_check_queue(perms,
935 AP_QID_QUEUE(zq->queue->qid)))
936 continue;
937 /* penalty if the msg was previously sent at this qid */
938 qpen = (tr && tr->again_counter && tr->last_qid &&
939 tr->last_qid == zq->queue->qid) ?
940 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
941 if (!zcrypt_queue_compare(zq, pref_zq,
942 wgt + cpen + qpen, pref_wgt))
943 continue;
944 pref_zc = zc;
945 pref_zq = zq;
946 pref_wgt = wgt + cpen + qpen;
947 }
948 }
949 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
950 spin_unlock(&zcrypt_list_lock);
951
952 if (!pref_zq) {
953 pr_debug("no match for address %02x.%04x => ENODEV\n",
954 card, domain);
955 rc = -ENODEV;
956 goto out;
957 }
958
959 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
960 if (!rc) {
961 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
962 ap_msg.msg, ap_msg.len, false);
963 }
964
965 spin_lock(&zcrypt_list_lock);
966 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
967 spin_unlock(&zcrypt_list_lock);
968
969 out:
970 ap_release_apmsg(&ap_msg);
971 if (tr) {
972 tr->last_rc = rc;
973 tr->last_qid = qid;
974 }
975 trace_s390_zcrypt_rep(xcrb, func_code, rc,
976 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
977 ap_msg.psmid);
978 return rc;
979 }
980
zcrypt_send_cprb(struct ica_xcRB * xcrb,u32 xflags)981 long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
982 {
983 struct zcrypt_track tr;
984 int rc;
985
986 memset(&tr, 0, sizeof(tr));
987
988 do {
989 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
990 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
991
992 /* on ENODEV failure: retry once again after a requested rescan */
993 if (rc == -ENODEV && zcrypt_process_rescan())
994 do {
995 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
996 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
997 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
998 rc = -EIO;
999 if (rc)
1000 pr_debug("rc=%d\n", rc);
1001
1002 return rc;
1003 }
1004 EXPORT_SYMBOL(zcrypt_send_cprb);
1005
is_desired_ep11_card(unsigned int dev_id,unsigned short target_num,struct ep11_target_dev * targets)1006 static bool is_desired_ep11_card(unsigned int dev_id,
1007 unsigned short target_num,
1008 struct ep11_target_dev *targets)
1009 {
1010 while (target_num-- > 0) {
1011 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1012 return true;
1013 targets++;
1014 }
1015 return false;
1016 }
1017
is_desired_ep11_queue(unsigned int dev_qid,unsigned short target_num,struct ep11_target_dev * targets)1018 static bool is_desired_ep11_queue(unsigned int dev_qid,
1019 unsigned short target_num,
1020 struct ep11_target_dev *targets)
1021 {
1022 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1023
1024 while (target_num-- > 0) {
1025 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1026 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1027 return true;
1028 targets++;
1029 }
1030 return false;
1031 }
1032
_zcrypt_send_ep11_cprb(u32 xflags,struct ap_perms * perms,struct zcrypt_track * tr,struct ep11_urb * xcrb)1033 static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms,
1034 struct zcrypt_track *tr,
1035 struct ep11_urb *xcrb)
1036 {
1037 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
1038 struct zcrypt_card *zc, *pref_zc;
1039 struct zcrypt_queue *zq, *pref_zq;
1040 struct ep11_target_dev *targets = NULL;
1041 unsigned short target_num;
1042 unsigned int wgt = 0, pref_wgt = 0;
1043 unsigned int func_code = 0, domain;
1044 struct ap_message ap_msg;
1045 int cpen, qpen, qid = 0, rc;
1046 struct module *mod;
1047
1048 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1049
1050 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
1051 AP_MSG_FLAG_MEMPOOL : 0);
1052 if (rc)
1053 goto out;
1054
1055 target_num = (unsigned short)xcrb->targets_num;
1056
1057 /* empty list indicates autoselect (all available targets) */
1058 rc = -ENOMEM;
1059 if (target_num != 0) {
1060 if (userspace) {
1061 targets = kzalloc_objs(*targets, target_num);
1062 if (!targets)
1063 goto out;
1064 if (copy_from_user(targets, xcrb->targets,
1065 target_num * sizeof(*targets))) {
1066 rc = -EFAULT;
1067 goto out;
1068 }
1069 } else {
1070 targets = (struct ep11_target_dev __force __kernel *)xcrb->targets;
1071 }
1072 }
1073
1074 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1075 if (rc)
1076 goto out;
1077 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
1078 ap_msg.msg, ap_msg.len, false);
1079
1080 if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1081 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1082 if (!test_bit_inv(domain, perms->adm)) {
1083 rc = -ENODEV;
1084 goto out;
1085 }
1086 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1087 rc = -EOPNOTSUPP;
1088 goto out;
1089 }
1090 }
1091
1092 pref_zc = NULL;
1093 pref_zq = NULL;
1094 spin_lock(&zcrypt_list_lock);
1095 for_each_zcrypt_card(zc) {
1096 /* Check for usable EP11 card */
1097 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1098 !zc->card->hwinfo.ep11)
1099 continue;
1100 /* Check for user selected EP11 card */
1101 if (targets &&
1102 !is_desired_ep11_card(zc->card->id, target_num, targets))
1103 continue;
1104 /* check if request size exceeds card max msg size */
1105 if (ap_msg.len > zc->card->maxmsgsize)
1106 continue;
1107 /* check if device node has admission for this card */
1108 if (!zcrypt_check_card(perms, zc->card->id))
1109 continue;
1110 /* get weight index of the card device */
1111 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1112 /* penalty if this msg was previously sent via this card */
1113 cpen = (tr && tr->again_counter && tr->last_qid &&
1114 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1115 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1116 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1117 continue;
1118 for_each_zcrypt_queue(zq, zc) {
1119 /* check if device is usable and eligible */
1120 if (!zq->online || !zq->ops->send_ep11_cprb ||
1121 !ap_queue_usable(zq->queue) ||
1122 (targets &&
1123 !is_desired_ep11_queue(zq->queue->qid,
1124 target_num, targets)))
1125 continue;
1126 /* check if device node has admission for this queue */
1127 if (!zcrypt_check_queue(perms,
1128 AP_QID_QUEUE(zq->queue->qid)))
1129 continue;
1130 /* penalty if the msg was previously sent at this qid */
1131 qpen = (tr && tr->again_counter && tr->last_qid &&
1132 tr->last_qid == zq->queue->qid) ?
1133 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1134 if (!zcrypt_queue_compare(zq, pref_zq,
1135 wgt + cpen + qpen, pref_wgt))
1136 continue;
1137 pref_zc = zc;
1138 pref_zq = zq;
1139 pref_wgt = wgt + cpen + qpen;
1140 }
1141 }
1142 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1143 spin_unlock(&zcrypt_list_lock);
1144
1145 if (!pref_zq) {
1146 if (targets && target_num == 1) {
1147 pr_debug("no match for address %02x.%04x => ENODEV\n",
1148 (int)targets->ap_id, (int)targets->dom_id);
1149 } else if (targets) {
1150 pr_debug("no match for %d target addrs => ENODEV\n",
1151 (int)target_num);
1152 } else {
1153 pr_debug("no match for address ff.ffff => ENODEV\n");
1154 }
1155 rc = -ENODEV;
1156 goto out;
1157 }
1158
1159 qid = pref_zq->queue->qid;
1160 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1161 if (!rc) {
1162 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
1163 ap_msg.msg, ap_msg.len, false);
1164 }
1165
1166 spin_lock(&zcrypt_list_lock);
1167 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1168 spin_unlock(&zcrypt_list_lock);
1169
1170 out:
1171 if (userspace)
1172 kfree(targets);
1173 ap_release_apmsg(&ap_msg);
1174 if (tr) {
1175 tr->last_rc = rc;
1176 tr->last_qid = qid;
1177 }
1178 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1179 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1180 ap_msg.psmid);
1181 return rc;
1182 }
1183
zcrypt_send_ep11_cprb(struct ep11_urb * xcrb,u32 xflags)1184 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
1185 {
1186 struct zcrypt_track tr;
1187 int rc;
1188
1189 memset(&tr, 0, sizeof(tr));
1190
1191 do {
1192 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1193 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1194
1195 /* on ENODEV failure: retry once again after a requested rescan */
1196 if (rc == -ENODEV && zcrypt_process_rescan())
1197 do {
1198 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1199 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1200 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1201 rc = -EIO;
1202 if (rc)
1203 pr_debug("rc=%d\n", rc);
1204
1205 return rc;
1206 }
1207 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1208
zcrypt_rng(char * buffer)1209 static long zcrypt_rng(char *buffer)
1210 {
1211 struct zcrypt_card *zc, *pref_zc;
1212 struct zcrypt_queue *zq, *pref_zq;
1213 unsigned int wgt = 0, pref_wgt = 0;
1214 unsigned int func_code = 0;
1215 struct ap_message ap_msg;
1216 int qid = 0, rc = -ENODEV;
1217 struct module *mod;
1218
1219 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1220
1221 rc = ap_init_apmsg(&ap_msg, 0);
1222 if (rc)
1223 goto out;
1224 rc = prep_rng_ap_msg(&ap_msg, &func_code, NULL);
1225 if (rc)
1226 goto out;
1227
1228 pref_zc = NULL;
1229 pref_zq = NULL;
1230 spin_lock(&zcrypt_list_lock);
1231 for_each_zcrypt_card(zc) {
1232 /* Check for usable CCA card */
1233 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1234 !zc->card->hwinfo.cca)
1235 continue;
1236 /* get weight index of the card device */
1237 wgt = zc->speed_rating[func_code];
1238 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1239 continue;
1240 for_each_zcrypt_queue(zq, zc) {
1241 /* check if device is usable and eligible */
1242 if (!zq->online || !zq->ops->rng ||
1243 !ap_queue_usable(zq->queue))
1244 continue;
1245 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1246 continue;
1247 pref_zc = zc;
1248 pref_zq = zq;
1249 pref_wgt = wgt;
1250 }
1251 }
1252 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1253 spin_unlock(&zcrypt_list_lock);
1254
1255 if (!pref_zq) {
1256 pr_debug("no matching queue found => ENODEV\n");
1257 rc = -ENODEV;
1258 goto out;
1259 }
1260
1261 qid = pref_zq->queue->qid;
1262 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1263
1264 spin_lock(&zcrypt_list_lock);
1265 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1266 spin_unlock(&zcrypt_list_lock);
1267
1268 out:
1269 ap_release_apmsg(&ap_msg);
1270 trace_s390_zcrypt_rep(buffer, func_code, rc,
1271 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1272 ap_msg.psmid);
1273 return rc;
1274 }
1275
zcrypt_device_status_mask(struct zcrypt_device_status * devstatus)1276 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1277 {
1278 struct zcrypt_card *zc;
1279 struct zcrypt_queue *zq;
1280 struct zcrypt_device_status *stat;
1281 int card, queue;
1282
1283 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1284 * sizeof(struct zcrypt_device_status));
1285
1286 spin_lock(&zcrypt_list_lock);
1287 for_each_zcrypt_card(zc) {
1288 for_each_zcrypt_queue(zq, zc) {
1289 card = AP_QID_CARD(zq->queue->qid);
1290 if (card >= MAX_ZDEV_CARDIDS)
1291 continue;
1292 queue = AP_QID_QUEUE(zq->queue->qid);
1293 stat = &devstatus[card * AP_DOMAINS + queue];
1294 stat->hwtype = zc->card->ap_dev.device_type;
1295 stat->functions = zc->card->hwinfo.fac >> 26;
1296 stat->qid = zq->queue->qid;
1297 stat->online = zq->online ? 0x01 : 0x00;
1298 }
1299 }
1300 spin_unlock(&zcrypt_list_lock);
1301 }
1302
zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext * devstatus,int maxcard,int maxqueue)1303 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
1304 int maxcard, int maxqueue)
1305 {
1306 struct zcrypt_card *zc;
1307 struct zcrypt_queue *zq;
1308 struct zcrypt_device_status_ext *stat;
1309 int card, queue;
1310
1311 maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT);
1312 maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT);
1313
1314 spin_lock(&zcrypt_list_lock);
1315 for_each_zcrypt_card(zc) {
1316 for_each_zcrypt_queue(zq, zc) {
1317 card = AP_QID_CARD(zq->queue->qid);
1318 queue = AP_QID_QUEUE(zq->queue->qid);
1319 if (card >= maxcard || queue >= maxqueue)
1320 continue;
1321 stat = &devstatus[card * maxqueue + queue];
1322 stat->hwtype = zc->card->ap_dev.device_type;
1323 stat->functions = zc->card->hwinfo.fac >> 26;
1324 stat->qid = zq->queue->qid;
1325 stat->online = zq->online ? 0x01 : 0x00;
1326 }
1327 }
1328 spin_unlock(&zcrypt_list_lock);
1329 }
1330 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1331
zcrypt_device_status_ext(int card,int queue,struct zcrypt_device_status_ext * devstat)1332 int zcrypt_device_status_ext(int card, int queue,
1333 struct zcrypt_device_status_ext *devstat)
1334 {
1335 struct zcrypt_card *zc;
1336 struct zcrypt_queue *zq;
1337
1338 memset(devstat, 0, sizeof(*devstat));
1339
1340 spin_lock(&zcrypt_list_lock);
1341 for_each_zcrypt_card(zc) {
1342 for_each_zcrypt_queue(zq, zc) {
1343 if (card == AP_QID_CARD(zq->queue->qid) &&
1344 queue == AP_QID_QUEUE(zq->queue->qid)) {
1345 devstat->hwtype = zc->card->ap_dev.device_type;
1346 devstat->functions = zc->card->hwinfo.fac >> 26;
1347 devstat->qid = zq->queue->qid;
1348 devstat->online = zq->online ? 0x01 : 0x00;
1349 spin_unlock(&zcrypt_list_lock);
1350 return 0;
1351 }
1352 }
1353 }
1354 spin_unlock(&zcrypt_list_lock);
1355
1356 return -ENODEV;
1357 }
1358 EXPORT_SYMBOL(zcrypt_device_status_ext);
1359
zcrypt_status_mask(char status[],size_t max_adapters)1360 static void zcrypt_status_mask(char status[], size_t max_adapters)
1361 {
1362 struct zcrypt_card *zc;
1363 struct zcrypt_queue *zq;
1364 int card;
1365
1366 memset(status, 0, max_adapters);
1367 spin_lock(&zcrypt_list_lock);
1368 for_each_zcrypt_card(zc) {
1369 for_each_zcrypt_queue(zq, zc) {
1370 card = AP_QID_CARD(zq->queue->qid);
1371 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1372 card >= max_adapters)
1373 continue;
1374 status[card] = zc->online ? zc->user_space_type : 0x0d;
1375 }
1376 }
1377 spin_unlock(&zcrypt_list_lock);
1378 }
1379
zcrypt_qdepth_mask(char qdepth[],size_t max_adapters)1380 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1381 {
1382 struct zcrypt_card *zc;
1383 struct zcrypt_queue *zq;
1384 int card;
1385
1386 memset(qdepth, 0, max_adapters);
1387 spin_lock(&zcrypt_list_lock);
1388 local_bh_disable();
1389 for_each_zcrypt_card(zc) {
1390 for_each_zcrypt_queue(zq, zc) {
1391 card = AP_QID_CARD(zq->queue->qid);
1392 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1393 card >= max_adapters)
1394 continue;
1395 spin_lock(&zq->queue->lock);
1396 qdepth[card] =
1397 zq->queue->pendingq_count +
1398 zq->queue->requestq_count;
1399 spin_unlock(&zq->queue->lock);
1400 }
1401 }
1402 local_bh_enable();
1403 spin_unlock(&zcrypt_list_lock);
1404 }
1405
zcrypt_perdev_reqcnt(u32 reqcnt[],size_t max_adapters)1406 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1407 {
1408 struct zcrypt_card *zc;
1409 struct zcrypt_queue *zq;
1410 int card;
1411 u64 cnt;
1412
1413 memset(reqcnt, 0, sizeof(int) * max_adapters);
1414 spin_lock(&zcrypt_list_lock);
1415 local_bh_disable();
1416 for_each_zcrypt_card(zc) {
1417 for_each_zcrypt_queue(zq, zc) {
1418 card = AP_QID_CARD(zq->queue->qid);
1419 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1420 card >= max_adapters)
1421 continue;
1422 spin_lock(&zq->queue->lock);
1423 cnt = zq->queue->total_request_count;
1424 spin_unlock(&zq->queue->lock);
1425 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1426 }
1427 }
1428 local_bh_enable();
1429 spin_unlock(&zcrypt_list_lock);
1430 }
1431
zcrypt_pendingq_count(void)1432 static int zcrypt_pendingq_count(void)
1433 {
1434 struct zcrypt_card *zc;
1435 struct zcrypt_queue *zq;
1436 int pendingq_count;
1437
1438 pendingq_count = 0;
1439 spin_lock(&zcrypt_list_lock);
1440 local_bh_disable();
1441 for_each_zcrypt_card(zc) {
1442 for_each_zcrypt_queue(zq, zc) {
1443 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1444 continue;
1445 spin_lock(&zq->queue->lock);
1446 pendingq_count += zq->queue->pendingq_count;
1447 spin_unlock(&zq->queue->lock);
1448 }
1449 }
1450 local_bh_enable();
1451 spin_unlock(&zcrypt_list_lock);
1452 return pendingq_count;
1453 }
1454
zcrypt_requestq_count(void)1455 static int zcrypt_requestq_count(void)
1456 {
1457 struct zcrypt_card *zc;
1458 struct zcrypt_queue *zq;
1459 int requestq_count;
1460
1461 requestq_count = 0;
1462 spin_lock(&zcrypt_list_lock);
1463 local_bh_disable();
1464 for_each_zcrypt_card(zc) {
1465 for_each_zcrypt_queue(zq, zc) {
1466 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1467 continue;
1468 spin_lock(&zq->queue->lock);
1469 requestq_count += zq->queue->requestq_count;
1470 spin_unlock(&zq->queue->lock);
1471 }
1472 }
1473 local_bh_enable();
1474 spin_unlock(&zcrypt_list_lock);
1475 return requestq_count;
1476 }
1477
icarsamodexpo_ioctl(struct ap_perms * perms,unsigned long arg)1478 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1479 {
1480 int rc;
1481 struct zcrypt_track tr;
1482 struct ica_rsa_modexpo mex;
1483 struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1484
1485 memset(&tr, 0, sizeof(tr));
1486 if (copy_from_user(&mex, umex, sizeof(mex)))
1487 return -EFAULT;
1488
1489 do {
1490 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1491 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1492
1493 /* on ENODEV failure: retry once again after a requested rescan */
1494 if (rc == -ENODEV && zcrypt_process_rescan())
1495 do {
1496 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1497 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1498 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1499 rc = -EIO;
1500 if (rc) {
1501 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
1502 return rc;
1503 }
1504 return put_user(mex.outputdatalength, &umex->outputdatalength);
1505 }
1506
icarsacrt_ioctl(struct ap_perms * perms,unsigned long arg)1507 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1508 {
1509 int rc;
1510 struct zcrypt_track tr;
1511 struct ica_rsa_modexpo_crt crt;
1512 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1513
1514 memset(&tr, 0, sizeof(tr));
1515 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1516 return -EFAULT;
1517
1518 do {
1519 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1520 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1521
1522 /* on ENODEV failure: retry once again after a requested rescan */
1523 if (rc == -ENODEV && zcrypt_process_rescan())
1524 do {
1525 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1526 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1527 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1528 rc = -EIO;
1529 if (rc) {
1530 pr_debug("ioctl ICARSACRT rc=%d\n", rc);
1531 return rc;
1532 }
1533 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1534 }
1535
zsecsendcprb_ioctl(struct ap_perms * perms,unsigned long arg)1536 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1537 {
1538 int rc;
1539 struct ica_xcRB xcrb;
1540 struct zcrypt_track tr;
1541 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1542 struct ica_xcRB __user *uxcrb = (void __user *)arg;
1543
1544 memset(&tr, 0, sizeof(tr));
1545 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1546 return -EFAULT;
1547
1548 do {
1549 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1550 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1551
1552 /* on ENODEV failure: retry once again after a requested rescan */
1553 if (rc == -ENODEV && zcrypt_process_rescan())
1554 do {
1555 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1556 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1557 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1558 rc = -EIO;
1559 if (rc)
1560 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1561 rc, xcrb.status);
1562 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1563 return -EFAULT;
1564 return rc;
1565 }
1566
zsendep11cprb_ioctl(struct ap_perms * perms,unsigned long arg)1567 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1568 {
1569 int rc;
1570 struct ep11_urb xcrb;
1571 struct zcrypt_track tr;
1572 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1573 struct ep11_urb __user *uxcrb = (void __user *)arg;
1574
1575 memset(&tr, 0, sizeof(tr));
1576 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1577 return -EFAULT;
1578
1579 do {
1580 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1581 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1582
1583 /* on ENODEV failure: retry once again after a requested rescan */
1584 if (rc == -ENODEV && zcrypt_process_rescan())
1585 do {
1586 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1587 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1588 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1589 rc = -EIO;
1590 if (rc)
1591 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1592 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1593 return -EFAULT;
1594 return rc;
1595 }
1596
zcrypt_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1597 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1598 unsigned long arg)
1599 {
1600 int rc;
1601 struct ap_perms *perms =
1602 (struct ap_perms *)filp->private_data;
1603
1604 rc = zcrypt_check_ioctl(perms, cmd);
1605 if (rc)
1606 return rc;
1607
1608 switch (cmd) {
1609 case ICARSAMODEXPO:
1610 return icarsamodexpo_ioctl(perms, arg);
1611 case ICARSACRT:
1612 return icarsacrt_ioctl(perms, arg);
1613 case ZSECSENDCPRB:
1614 return zsecsendcprb_ioctl(perms, arg);
1615 case ZSENDEP11CPRB:
1616 return zsendep11cprb_ioctl(perms, arg);
1617 case ZCRYPT_DEVICE_STATUS: {
1618 struct zcrypt_device_status_ext *device_status;
1619 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1620 * sizeof(struct zcrypt_device_status_ext);
1621
1622 device_status = kvzalloc_objs(struct zcrypt_device_status_ext,
1623 MAX_ZDEV_ENTRIES_EXT);
1624 if (!device_status)
1625 return -ENOMEM;
1626 zcrypt_device_status_mask_ext(device_status,
1627 MAX_ZDEV_CARDIDS_EXT,
1628 MAX_ZDEV_DOMAINS_EXT);
1629 if (copy_to_user((char __user *)arg, device_status,
1630 total_size))
1631 rc = -EFAULT;
1632 kvfree(device_status);
1633 return rc;
1634 }
1635 case ZCRYPT_STATUS_MASK: {
1636 char status[AP_DEVICES];
1637
1638 zcrypt_status_mask(status, AP_DEVICES);
1639 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1640 return -EFAULT;
1641 return 0;
1642 }
1643 case ZCRYPT_QDEPTH_MASK: {
1644 char qdepth[AP_DEVICES];
1645
1646 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1647 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1648 return -EFAULT;
1649 return 0;
1650 }
1651 case ZCRYPT_PERDEV_REQCNT: {
1652 u32 *reqcnt;
1653
1654 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1655 if (!reqcnt)
1656 return -ENOMEM;
1657 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1658 if (copy_to_user((int __user *)arg, reqcnt,
1659 sizeof(u32) * AP_DEVICES))
1660 rc = -EFAULT;
1661 kfree(reqcnt);
1662 return rc;
1663 }
1664 case Z90STAT_REQUESTQ_COUNT:
1665 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1666 case Z90STAT_PENDINGQ_COUNT:
1667 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1668 case Z90STAT_TOTALOPEN_COUNT:
1669 return put_user(atomic_read(&zcrypt_open_count),
1670 (int __user *)arg);
1671 case Z90STAT_DOMAIN_INDEX:
1672 return put_user(ap_domain_index, (int __user *)arg);
1673 /*
1674 * Deprecated ioctls
1675 */
1676 case ZDEVICESTATUS: {
1677 /* the old ioctl supports only 64 adapters */
1678 struct zcrypt_device_status *device_status;
1679 size_t total_size = MAX_ZDEV_ENTRIES
1680 * sizeof(struct zcrypt_device_status);
1681
1682 device_status = kzalloc(total_size, GFP_KERNEL);
1683 if (!device_status)
1684 return -ENOMEM;
1685 zcrypt_device_status_mask(device_status);
1686 if (copy_to_user((char __user *)arg, device_status,
1687 total_size))
1688 rc = -EFAULT;
1689 kfree(device_status);
1690 return rc;
1691 }
1692 case Z90STAT_STATUS_MASK: {
1693 /* the old ioctl supports only 64 adapters */
1694 char status[MAX_ZDEV_CARDIDS];
1695
1696 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1697 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1698 return -EFAULT;
1699 return 0;
1700 }
1701 case Z90STAT_QDEPTH_MASK: {
1702 /* the old ioctl supports only 64 adapters */
1703 char qdepth[MAX_ZDEV_CARDIDS];
1704
1705 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1706 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1707 return -EFAULT;
1708 return 0;
1709 }
1710 case Z90STAT_PERDEV_REQCNT: {
1711 /* the old ioctl supports only 64 adapters */
1712 u32 reqcnt[MAX_ZDEV_CARDIDS];
1713
1714 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1715 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1716 return -EFAULT;
1717 return 0;
1718 }
1719 /* unknown ioctl number */
1720 default:
1721 pr_debug("unknown ioctl 0x%08x\n", cmd);
1722 return -ENOIOCTLCMD;
1723 }
1724 }
1725
1726 /*
1727 * Misc device file operations.
1728 */
1729 static const struct file_operations zcrypt_fops = {
1730 .owner = THIS_MODULE,
1731 .read = zcrypt_read,
1732 .write = zcrypt_write,
1733 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1734 .open = zcrypt_open,
1735 .release = zcrypt_release,
1736 };
1737
1738 /*
1739 * Misc device.
1740 */
1741 static struct miscdevice zcrypt_misc_device = {
1742 .minor = MISC_DYNAMIC_MINOR,
1743 .name = "z90crypt",
1744 .fops = &zcrypt_fops,
1745 };
1746
1747 static int zcrypt_rng_device_count;
1748 static u32 *zcrypt_rng_buffer;
1749 static int zcrypt_rng_buffer_index;
1750 static DEFINE_MUTEX(zcrypt_rng_mutex);
1751
zcrypt_rng_data_read(struct hwrng * rng,u32 * data)1752 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1753 {
1754 int rc;
1755
1756 /*
1757 * We don't need locking here because the RNG API guarantees serialized
1758 * read method calls.
1759 */
1760 if (zcrypt_rng_buffer_index == 0) {
1761 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1762 /* on ENODEV failure: retry once again after an AP bus rescan */
1763 if (rc == -ENODEV && zcrypt_process_rescan())
1764 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1765 if (rc < 0)
1766 return -EIO;
1767 zcrypt_rng_buffer_index = rc / sizeof(*data);
1768 }
1769 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1770 return sizeof(*data);
1771 }
1772
1773 static struct hwrng zcrypt_rng_dev = {
1774 .name = "zcrypt",
1775 .data_read = zcrypt_rng_data_read,
1776 .quality = 990,
1777 };
1778
zcrypt_rng_device_add(void)1779 int zcrypt_rng_device_add(void)
1780 {
1781 int rc = 0;
1782
1783 mutex_lock(&zcrypt_rng_mutex);
1784 if (zcrypt_rng_device_count == 0) {
1785 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
1786 if (!zcrypt_rng_buffer) {
1787 rc = -ENOMEM;
1788 goto out;
1789 }
1790 zcrypt_rng_buffer_index = 0;
1791 rc = hwrng_register(&zcrypt_rng_dev);
1792 if (rc)
1793 goto out_free;
1794 zcrypt_rng_device_count = 1;
1795 } else {
1796 zcrypt_rng_device_count++;
1797 }
1798 mutex_unlock(&zcrypt_rng_mutex);
1799 return 0;
1800
1801 out_free:
1802 free_page((unsigned long)zcrypt_rng_buffer);
1803 out:
1804 mutex_unlock(&zcrypt_rng_mutex);
1805 return rc;
1806 }
1807
zcrypt_rng_device_remove(void)1808 void zcrypt_rng_device_remove(void)
1809 {
1810 mutex_lock(&zcrypt_rng_mutex);
1811 zcrypt_rng_device_count--;
1812 if (zcrypt_rng_device_count == 0) {
1813 hwrng_unregister(&zcrypt_rng_dev);
1814 free_page((unsigned long)zcrypt_rng_buffer);
1815 }
1816 mutex_unlock(&zcrypt_rng_mutex);
1817 }
1818
1819 /*
1820 * Wait until the zcrypt api is operational.
1821 * The AP bus scan and the binding of ap devices to device drivers is
1822 * an asynchronous job. This function waits until these initial jobs
1823 * are done and so the zcrypt api should be ready to serve crypto
1824 * requests - if there are resources available. The function uses an
1825 * internal timeout of 30s. The very first caller will either wait for
1826 * ap bus bindings complete or the timeout happens. This state will be
1827 * remembered for further callers which will only be blocked until a
1828 * decision is made (timeout or bindings complete).
1829 * On timeout -ETIME is returned, on success the return value is 0.
1830 */
zcrypt_wait_api_operational(void)1831 int zcrypt_wait_api_operational(void)
1832 {
1833 static DEFINE_MUTEX(zcrypt_wait_api_lock);
1834 static int zcrypt_wait_api_state;
1835 int rc;
1836
1837 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
1838 if (rc)
1839 return rc;
1840
1841 switch (zcrypt_wait_api_state) {
1842 case 0:
1843 /* initial state, invoke wait for the ap bus complete */
1844 rc = ap_wait_apqn_bindings_complete(
1845 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
1846 switch (rc) {
1847 case 0:
1848 /* ap bus bindings are complete */
1849 zcrypt_wait_api_state = 1;
1850 break;
1851 case -EINTR:
1852 /* interrupted, go back to caller */
1853 break;
1854 case -ETIME:
1855 /* timeout */
1856 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
1857 __func__);
1858 zcrypt_wait_api_state = -ETIME;
1859 break;
1860 default:
1861 /* other failure */
1862 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
1863 break;
1864 }
1865 break;
1866 case 1:
1867 /* a previous caller already found ap bus bindings complete */
1868 rc = 0;
1869 break;
1870 default:
1871 /* a previous caller had timeout or other failure */
1872 rc = zcrypt_wait_api_state;
1873 break;
1874 }
1875
1876 mutex_unlock(&zcrypt_wait_api_lock);
1877
1878 return rc;
1879 }
1880 EXPORT_SYMBOL(zcrypt_wait_api_operational);
1881
zcrypt_debug_init(void)1882 int __init zcrypt_debug_init(void)
1883 {
1884 zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
1885 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
1886 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
1887 debug_set_level(zcrypt_dbf_info, DBF_ERR);
1888
1889 return 0;
1890 }
1891
zcrypt_debug_exit(void)1892 void zcrypt_debug_exit(void)
1893 {
1894 debug_unregister(zcrypt_dbf_info);
1895 }
1896
zcdn_init(void)1897 static int __init zcdn_init(void)
1898 {
1899 int rc;
1900
1901 /* create a new class 'zcrypt' */
1902 rc = class_register(&zcrypt_class);
1903 if (rc)
1904 goto out_class_register_failed;
1905
1906 /* alloc device minor range */
1907 rc = alloc_chrdev_region(&zcrypt_devt,
1908 0, ZCRYPT_MAX_MINOR_NODES,
1909 ZCRYPT_NAME);
1910 if (rc)
1911 goto out_alloc_chrdev_failed;
1912
1913 cdev_init(&zcrypt_cdev, &zcrypt_fops);
1914 zcrypt_cdev.owner = THIS_MODULE;
1915 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1916 if (rc)
1917 goto out_cdev_add_failed;
1918
1919 /* need some class specific sysfs attributes */
1920 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
1921 if (rc)
1922 goto out_class_create_file_1_failed;
1923 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
1924 if (rc)
1925 goto out_class_create_file_2_failed;
1926
1927 return 0;
1928
1929 out_class_create_file_2_failed:
1930 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
1931 out_class_create_file_1_failed:
1932 cdev_del(&zcrypt_cdev);
1933 out_cdev_add_failed:
1934 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1935 out_alloc_chrdev_failed:
1936 class_unregister(&zcrypt_class);
1937 out_class_register_failed:
1938 return rc;
1939 }
1940
zcdn_exit(void)1941 static void zcdn_exit(void)
1942 {
1943 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
1944 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
1945 zcdn_destroy_all();
1946 cdev_del(&zcrypt_cdev);
1947 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1948 class_unregister(&zcrypt_class);
1949 }
1950
1951 /*
1952 * zcrypt_api_init(): Module initialization.
1953 *
1954 * The module initialization code.
1955 */
zcrypt_api_init(void)1956 int __init zcrypt_api_init(void)
1957 {
1958 int rc;
1959
1960 /* make sure the mempool threshold is >= 1 */
1961 if (zcrypt_mempool_threshold < 1) {
1962 rc = -EINVAL;
1963 goto out;
1964 }
1965
1966 rc = zcrypt_debug_init();
1967 if (rc)
1968 goto out;
1969
1970 rc = zcdn_init();
1971 if (rc)
1972 goto out_zcdn_init_failed;
1973
1974 rc = zcrypt_ccamisc_init();
1975 if (rc)
1976 goto out_ccamisc_init_failed;
1977
1978 rc = zcrypt_ep11misc_init();
1979 if (rc)
1980 goto out_ep11misc_init_failed;
1981
1982 /* Register the request sprayer. */
1983 rc = misc_register(&zcrypt_misc_device);
1984 if (rc < 0)
1985 goto out_misc_register_failed;
1986
1987 zcrypt_msgtype6_init();
1988 zcrypt_msgtype50_init();
1989
1990 return 0;
1991
1992 out_misc_register_failed:
1993 zcrypt_ep11misc_exit();
1994 out_ep11misc_init_failed:
1995 zcrypt_ccamisc_exit();
1996 out_ccamisc_init_failed:
1997 zcdn_exit();
1998 out_zcdn_init_failed:
1999 zcrypt_debug_exit();
2000 out:
2001 return rc;
2002 }
2003
2004 /*
2005 * zcrypt_api_exit(): Module termination.
2006 *
2007 * The module termination code.
2008 */
zcrypt_api_exit(void)2009 void __exit zcrypt_api_exit(void)
2010 {
2011 zcdn_exit();
2012 misc_deregister(&zcrypt_misc_device);
2013 zcrypt_msgtype6_exit();
2014 zcrypt_msgtype50_exit();
2015 zcrypt_ccamisc_exit();
2016 zcrypt_ep11misc_exit();
2017 zcrypt_debug_exit();
2018 }
2019
2020 module_init(zcrypt_api_init);
2021 module_exit(zcrypt_api_exit);
2022