1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright IBM Corp. 2001, 2018
4 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
13 */
14
15 #define pr_fmt(fmt) "zcrypt: " fmt
16
17 #include <linux/export.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/miscdevice.h>
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 #include <linux/atomic.h>
25 #include <linux/uaccess.h>
26 #include <linux/hw_random.h>
27 #include <linux/debugfs.h>
28 #include <linux/cdev.h>
29 #include <linux/ctype.h>
30 #include <linux/capability.h>
31 #include <asm/debug.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <asm/trace/zcrypt.h>
35
36 #include "zcrypt_api.h"
37 #include "zcrypt_debug.h"
38
39 #include "zcrypt_msgtype6.h"
40 #include "zcrypt_msgtype50.h"
41 #include "zcrypt_ccamisc.h"
42 #include "zcrypt_ep11misc.h"
43
44 /*
45 * Module description.
46 */
47 MODULE_AUTHOR("IBM Corporation");
48 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
49 "Copyright IBM Corp. 2001, 2012");
50 MODULE_LICENSE("GPL");
51
52 unsigned int zcrypt_mempool_threshold = 5;
53 module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0400);
54 MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)");
55
56 /*
57 * zcrypt tracepoint functions
58 */
59 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
60 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
61
62 DEFINE_SPINLOCK(zcrypt_list_lock);
63 LIST_HEAD(zcrypt_card_list);
64
65 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
66
67 static LIST_HEAD(zcrypt_ops_list);
68
69 /* Zcrypt related debug feature stuff. */
70 debug_info_t *zcrypt_dbf_info;
71
72 /*
73 * Process a rescan of the transport layer.
74 * Runs a synchronous AP bus rescan.
75 * Returns true if something has changed (for example the
76 * bus scan has found and build up new devices) and it is
77 * worth to do a retry. Otherwise false is returned meaning
78 * no changes on the AP bus level.
79 */
zcrypt_process_rescan(void)80 static inline bool zcrypt_process_rescan(void)
81 {
82 return ap_bus_force_rescan();
83 }
84
zcrypt_msgtype_register(struct zcrypt_ops * zops)85 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
86 {
87 list_add_tail(&zops->list, &zcrypt_ops_list);
88 }
89
zcrypt_msgtype_unregister(struct zcrypt_ops * zops)90 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
91 {
92 list_del_init(&zops->list);
93 }
94
zcrypt_msgtype(unsigned char * name,int variant)95 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
96 {
97 struct zcrypt_ops *zops;
98
99 list_for_each_entry(zops, &zcrypt_ops_list, list)
100 if (zops->variant == variant &&
101 (!strncmp(zops->name, name, sizeof(zops->name))))
102 return zops;
103 return NULL;
104 }
105 EXPORT_SYMBOL(zcrypt_msgtype);
106
107 /*
108 * Multi device nodes extension functions.
109 */
110
111 struct zcdn_device;
112
113 static void zcdn_device_release(struct device *dev);
114 static const struct class zcrypt_class = {
115 .name = ZCRYPT_NAME,
116 .dev_release = zcdn_device_release,
117 };
118 static dev_t zcrypt_devt;
119 static struct cdev zcrypt_cdev;
120
121 struct zcdn_device {
122 struct device device;
123 struct ap_perms perms;
124 };
125
126 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
127
128 #define ZCDN_MAX_NAME 32
129
130 static int zcdn_create(const char *name);
131 static int zcdn_destroy(const char *name);
132
133 /*
134 * Find zcdn device by name.
135 * Returns reference to the zcdn device which needs to be released
136 * with put_device() after use.
137 */
find_zcdndev_by_name(const char * name)138 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
139 {
140 struct device *dev = class_find_device_by_name(&zcrypt_class, name);
141
142 return dev ? to_zcdn_dev(dev) : NULL;
143 }
144
145 /*
146 * Find zcdn device by devt value.
147 * Returns reference to the zcdn device which needs to be released
148 * with put_device() after use.
149 */
find_zcdndev_by_devt(dev_t devt)150 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
151 {
152 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
153
154 return dev ? to_zcdn_dev(dev) : NULL;
155 }
156
ioctlmask_show(struct device * dev,struct device_attribute * attr,char * buf)157 static ssize_t ioctlmask_show(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160 {
161 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
162 int i, n;
163
164 if (mutex_lock_interruptible(&ap_attr_mutex))
165 return -ERESTARTSYS;
166
167 n = sysfs_emit(buf, "0x");
168 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
169 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
170 n += sysfs_emit_at(buf, n, "\n");
171
172 mutex_unlock(&ap_attr_mutex);
173
174 return n;
175 }
176
ioctlmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)177 static ssize_t ioctlmask_store(struct device *dev,
178 struct device_attribute *attr,
179 const char *buf, size_t count)
180 {
181 int rc;
182 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
183
184 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
185 AP_IOCTLS, &ap_attr_mutex);
186 if (rc)
187 return rc;
188
189 return count;
190 }
191
192 static DEVICE_ATTR_RW(ioctlmask);
193
apmask_show(struct device * dev,struct device_attribute * attr,char * buf)194 static ssize_t apmask_show(struct device *dev,
195 struct device_attribute *attr,
196 char *buf)
197 {
198 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
199 int i, n;
200
201 if (mutex_lock_interruptible(&ap_attr_mutex))
202 return -ERESTARTSYS;
203
204 n = sysfs_emit(buf, "0x");
205 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
206 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
207 n += sysfs_emit_at(buf, n, "\n");
208
209 mutex_unlock(&ap_attr_mutex);
210
211 return n;
212 }
213
apmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)214 static ssize_t apmask_store(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count)
217 {
218 int rc;
219 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
220
221 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
222 AP_DEVICES, &ap_attr_mutex);
223 if (rc)
224 return rc;
225
226 return count;
227 }
228
229 static DEVICE_ATTR_RW(apmask);
230
aqmask_show(struct device * dev,struct device_attribute * attr,char * buf)231 static ssize_t aqmask_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234 {
235 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
236 int i, n;
237
238 if (mutex_lock_interruptible(&ap_attr_mutex))
239 return -ERESTARTSYS;
240
241 n = sysfs_emit(buf, "0x");
242 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
243 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
244 n += sysfs_emit_at(buf, n, "\n");
245
246 mutex_unlock(&ap_attr_mutex);
247
248 return n;
249 }
250
aqmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)251 static ssize_t aqmask_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t count)
254 {
255 int rc;
256 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
257
258 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
259 AP_DOMAINS, &ap_attr_mutex);
260 if (rc)
261 return rc;
262
263 return count;
264 }
265
266 static DEVICE_ATTR_RW(aqmask);
267
admask_show(struct device * dev,struct device_attribute * attr,char * buf)268 static ssize_t admask_show(struct device *dev,
269 struct device_attribute *attr,
270 char *buf)
271 {
272 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
273 int i, n;
274
275 if (mutex_lock_interruptible(&ap_attr_mutex))
276 return -ERESTARTSYS;
277
278 n = sysfs_emit(buf, "0x");
279 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
280 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
281 n += sysfs_emit_at(buf, n, "\n");
282
283 mutex_unlock(&ap_attr_mutex);
284
285 return n;
286 }
287
admask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)288 static ssize_t admask_store(struct device *dev,
289 struct device_attribute *attr,
290 const char *buf, size_t count)
291 {
292 int rc;
293 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
294
295 rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
296 AP_DOMAINS, &ap_attr_mutex);
297 if (rc)
298 return rc;
299
300 return count;
301 }
302
303 static DEVICE_ATTR_RW(admask);
304
305 static struct attribute *zcdn_dev_attrs[] = {
306 &dev_attr_ioctlmask.attr,
307 &dev_attr_apmask.attr,
308 &dev_attr_aqmask.attr,
309 &dev_attr_admask.attr,
310 NULL
311 };
312
313 static struct attribute_group zcdn_dev_attr_group = {
314 .attrs = zcdn_dev_attrs
315 };
316
317 static const struct attribute_group *zcdn_dev_attr_groups[] = {
318 &zcdn_dev_attr_group,
319 NULL
320 };
321
zcdn_create_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)322 static ssize_t zcdn_create_store(const struct class *class,
323 const struct class_attribute *attr,
324 const char *buf, size_t count)
325 {
326 int rc;
327 char name[ZCDN_MAX_NAME];
328
329 strscpy(name, skip_spaces(buf), sizeof(name));
330
331 rc = zcdn_create(strim(name));
332
333 return rc ? rc : count;
334 }
335
336 static const struct class_attribute class_attr_zcdn_create =
337 __ATTR(create, 0600, NULL, zcdn_create_store);
338
zcdn_destroy_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)339 static ssize_t zcdn_destroy_store(const struct class *class,
340 const struct class_attribute *attr,
341 const char *buf, size_t count)
342 {
343 int rc;
344 char name[ZCDN_MAX_NAME];
345
346 strscpy(name, skip_spaces(buf), sizeof(name));
347
348 rc = zcdn_destroy(strim(name));
349
350 return rc ? rc : count;
351 }
352
353 static const struct class_attribute class_attr_zcdn_destroy =
354 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
355
zcdn_device_release(struct device * dev)356 static void zcdn_device_release(struct device *dev)
357 {
358 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
359
360 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
361 __func__, MAJOR(dev->devt), MINOR(dev->devt));
362
363 kfree(zcdndev);
364 }
365
zcdn_create(const char * name)366 static int zcdn_create(const char *name)
367 {
368 dev_t devt;
369 int i, rc = 0;
370 struct zcdn_device *zcdndev;
371
372 if (mutex_lock_interruptible(&ap_attr_mutex))
373 return -ERESTARTSYS;
374
375 /* check if device node with this name already exists */
376 if (name[0]) {
377 zcdndev = find_zcdndev_by_name(name);
378 if (zcdndev) {
379 put_device(&zcdndev->device);
380 rc = -EEXIST;
381 goto unlockout;
382 }
383 }
384
385 /* find an unused minor number */
386 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
387 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
388 zcdndev = find_zcdndev_by_devt(devt);
389 if (zcdndev)
390 put_device(&zcdndev->device);
391 else
392 break;
393 }
394 if (i == ZCRYPT_MAX_MINOR_NODES) {
395 rc = -ENOSPC;
396 goto unlockout;
397 }
398
399 /* alloc and prepare a new zcdn device */
400 zcdndev = kzalloc_obj(*zcdndev, GFP_KERNEL);
401 if (!zcdndev) {
402 rc = -ENOMEM;
403 goto unlockout;
404 }
405 zcdndev->device.release = zcdn_device_release;
406 zcdndev->device.class = &zcrypt_class;
407 zcdndev->device.devt = devt;
408 zcdndev->device.groups = zcdn_dev_attr_groups;
409 if (name[0])
410 rc = dev_set_name(&zcdndev->device, "%s", name);
411 else
412 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
413 if (rc) {
414 kfree(zcdndev);
415 goto unlockout;
416 }
417 rc = device_register(&zcdndev->device);
418 if (rc) {
419 put_device(&zcdndev->device);
420 goto unlockout;
421 }
422
423 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
424 __func__, MAJOR(devt), MINOR(devt));
425
426 unlockout:
427 mutex_unlock(&ap_attr_mutex);
428 return rc;
429 }
430
zcdn_destroy(const char * name)431 static int zcdn_destroy(const char *name)
432 {
433 int rc = 0;
434 struct zcdn_device *zcdndev;
435
436 if (mutex_lock_interruptible(&ap_attr_mutex))
437 return -ERESTARTSYS;
438
439 /* try to find this zcdn device */
440 zcdndev = find_zcdndev_by_name(name);
441 if (!zcdndev) {
442 rc = -ENOENT;
443 goto unlockout;
444 }
445
446 /*
447 * The zcdn device is not hard destroyed. It is subject to
448 * reference counting and thus just needs to be unregistered.
449 */
450 put_device(&zcdndev->device);
451 device_unregister(&zcdndev->device);
452
453 unlockout:
454 mutex_unlock(&ap_attr_mutex);
455 return rc;
456 }
457
zcdn_destroy_all(void)458 static void zcdn_destroy_all(void)
459 {
460 int i;
461 dev_t devt;
462 struct zcdn_device *zcdndev;
463
464 mutex_lock(&ap_attr_mutex);
465 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
466 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
467 zcdndev = find_zcdndev_by_devt(devt);
468 if (zcdndev) {
469 put_device(&zcdndev->device);
470 device_unregister(&zcdndev->device);
471 }
472 }
473 mutex_unlock(&ap_attr_mutex);
474 }
475
476 /*
477 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
478 *
479 * This function is not supported beyond zcrypt 1.3.1.
480 */
zcrypt_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)481 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
482 size_t count, loff_t *f_pos)
483 {
484 return -EPERM;
485 }
486
487 /*
488 * zcrypt_write(): Not allowed.
489 *
490 * Write is not allowed
491 */
zcrypt_write(struct file * filp,const char __user * buf,size_t count,loff_t * f_pos)492 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
493 size_t count, loff_t *f_pos)
494 {
495 return -EPERM;
496 }
497
498 /*
499 * zcrypt_open(): Count number of users.
500 *
501 * Device open function to count number of users.
502 */
zcrypt_open(struct inode * inode,struct file * filp)503 static int zcrypt_open(struct inode *inode, struct file *filp)
504 {
505 struct ap_perms *perms = &ap_perms;
506
507 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
508 struct zcdn_device *zcdndev;
509
510 if (mutex_lock_interruptible(&ap_attr_mutex))
511 return -ERESTARTSYS;
512 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
513 /* find returns a reference, no get_device() needed */
514 mutex_unlock(&ap_attr_mutex);
515 if (zcdndev)
516 perms = &zcdndev->perms;
517 }
518 filp->private_data = (void *)perms;
519
520 atomic_inc(&zcrypt_open_count);
521 return stream_open(inode, filp);
522 }
523
524 /*
525 * zcrypt_release(): Count number of users.
526 *
527 * Device close function to count number of users.
528 */
zcrypt_release(struct inode * inode,struct file * filp)529 static int zcrypt_release(struct inode *inode, struct file *filp)
530 {
531 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
532 struct zcdn_device *zcdndev;
533
534 mutex_lock(&ap_attr_mutex);
535 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
536 mutex_unlock(&ap_attr_mutex);
537 if (zcdndev) {
538 /* 2 puts here: one for find, one for open */
539 put_device(&zcdndev->device);
540 put_device(&zcdndev->device);
541 }
542 }
543
544 atomic_dec(&zcrypt_open_count);
545 return 0;
546 }
547
zcrypt_check_ioctl(struct ap_perms * perms,unsigned int cmd)548 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
549 unsigned int cmd)
550 {
551 int rc = -EPERM;
552 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
553
554 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
555 if (test_bit_inv(ioctlnr, perms->ioctlm))
556 rc = 0;
557 }
558
559 if (rc)
560 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
561 __func__, ioctlnr, rc);
562
563 return rc;
564 }
565
zcrypt_check_card(struct ap_perms * perms,int card)566 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
567 {
568 return test_bit_inv(card, perms->apm) ? true : false;
569 }
570
zcrypt_check_queue(struct ap_perms * perms,int queue)571 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
572 {
573 return test_bit_inv(queue, perms->aqm) ? true : false;
574 }
575
zcrypt_pick_queue(struct zcrypt_card * zc,struct zcrypt_queue * zq,struct module ** pmod,unsigned int weight)576 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
577 struct zcrypt_queue *zq,
578 struct module **pmod,
579 unsigned int weight)
580 {
581 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
582 return NULL;
583 zcrypt_card_get(zc);
584 zcrypt_queue_get(zq);
585 get_device(&zq->queue->ap_dev.device);
586 atomic_add(weight, &zc->load);
587 atomic_add(weight, &zq->load);
588 zq->request_count++;
589 *pmod = zq->queue->ap_dev.device.driver->owner;
590 return zq;
591 }
592
zcrypt_drop_queue(struct zcrypt_card * zc,struct zcrypt_queue * zq,struct module * mod,unsigned int weight)593 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
594 struct zcrypt_queue *zq,
595 struct module *mod,
596 unsigned int weight)
597 {
598 zq->request_count--;
599 atomic_sub(weight, &zc->load);
600 atomic_sub(weight, &zq->load);
601 put_device(&zq->queue->ap_dev.device);
602 zcrypt_queue_put(zq);
603 zcrypt_card_put(zc);
604 module_put(mod);
605 }
606
zcrypt_card_compare(struct zcrypt_card * zc,struct zcrypt_card * pref_zc,unsigned int weight,unsigned int pref_weight)607 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
608 struct zcrypt_card *pref_zc,
609 unsigned int weight,
610 unsigned int pref_weight)
611 {
612 if (!pref_zc)
613 return true;
614 weight += atomic_read(&zc->load);
615 pref_weight += atomic_read(&pref_zc->load);
616 if (weight == pref_weight)
617 return atomic64_read(&zc->card->total_request_count) <
618 atomic64_read(&pref_zc->card->total_request_count);
619 return weight < pref_weight;
620 }
621
zcrypt_queue_compare(struct zcrypt_queue * zq,struct zcrypt_queue * pref_zq,unsigned int weight,unsigned int pref_weight)622 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
623 struct zcrypt_queue *pref_zq,
624 unsigned int weight,
625 unsigned int pref_weight)
626 {
627 if (!pref_zq)
628 return true;
629 weight += atomic_read(&zq->load);
630 pref_weight += atomic_read(&pref_zq->load);
631 if (weight == pref_weight)
632 return zq->queue->total_request_count <
633 pref_zq->queue->total_request_count;
634 return weight < pref_weight;
635 }
636
637 /*
638 * zcrypt ioctls.
639 */
zcrypt_rsa_modexpo(struct ap_perms * perms,struct zcrypt_track * tr,struct ica_rsa_modexpo * mex)640 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
641 struct zcrypt_track *tr,
642 struct ica_rsa_modexpo *mex)
643 {
644 struct zcrypt_card *zc, *pref_zc;
645 struct zcrypt_queue *zq, *pref_zq;
646 struct ap_message ap_msg;
647 unsigned int wgt = 0, pref_wgt = 0;
648 unsigned int func_code = 0;
649 int cpen, qpen, qid = 0, rc;
650 struct module *mod;
651
652 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
653
654 rc = ap_init_apmsg(&ap_msg, 0);
655 if (rc)
656 goto out;
657
658 if (mex->outputdatalength < mex->inputdatalength) {
659 rc = -EINVAL;
660 goto out;
661 }
662
663 /*
664 * As long as outputdatalength is big enough, we can set the
665 * outputdatalength equal to the inputdatalength, since that is the
666 * number of bytes we will copy in any case
667 */
668 mex->outputdatalength = mex->inputdatalength;
669
670 rc = get_rsa_modex_fc(mex, &func_code);
671 if (rc)
672 goto out;
673
674 pref_zc = NULL;
675 pref_zq = NULL;
676 spin_lock(&zcrypt_list_lock);
677 for_each_zcrypt_card(zc) {
678 /* Check for usable accelerator or CCA card */
679 if (!zc->online || !zc->card->config || zc->card->chkstop ||
680 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
681 continue;
682 /* Check for size limits */
683 if (zc->min_mod_size > mex->inputdatalength ||
684 zc->max_mod_size < mex->inputdatalength)
685 continue;
686 /* check if device node has admission for this card */
687 if (!zcrypt_check_card(perms, zc->card->id))
688 continue;
689 /* get weight index of the card device */
690 wgt = zc->speed_rating[func_code];
691 /* penalty if this msg was previously sent via this card */
692 cpen = (tr && tr->again_counter && tr->last_qid &&
693 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
694 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
695 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
696 continue;
697 for_each_zcrypt_queue(zq, zc) {
698 /* check if device is usable and eligible */
699 if (!zq->online || !zq->ops->rsa_modexpo ||
700 !ap_queue_usable(zq->queue))
701 continue;
702 /* check if device node has admission for this queue */
703 if (!zcrypt_check_queue(perms,
704 AP_QID_QUEUE(zq->queue->qid)))
705 continue;
706 /* penalty if the msg was previously sent at this qid */
707 qpen = (tr && tr->again_counter && tr->last_qid &&
708 tr->last_qid == zq->queue->qid) ?
709 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
710 if (!zcrypt_queue_compare(zq, pref_zq,
711 wgt + cpen + qpen, pref_wgt))
712 continue;
713 pref_zc = zc;
714 pref_zq = zq;
715 pref_wgt = wgt + cpen + qpen;
716 }
717 }
718 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
719 spin_unlock(&zcrypt_list_lock);
720
721 if (!pref_zq) {
722 pr_debug("no matching queue found => ENODEV\n");
723 rc = -ENODEV;
724 goto out;
725 }
726
727 qid = pref_zq->queue->qid;
728 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
729
730 spin_lock(&zcrypt_list_lock);
731 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
732 spin_unlock(&zcrypt_list_lock);
733
734 out:
735 ap_release_apmsg(&ap_msg);
736 if (tr) {
737 tr->last_rc = rc;
738 tr->last_qid = qid;
739 }
740 trace_s390_zcrypt_rep(mex, func_code, rc,
741 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
742 ap_msg.psmid);
743 return rc;
744 }
745
zcrypt_rsa_crt(struct ap_perms * perms,struct zcrypt_track * tr,struct ica_rsa_modexpo_crt * crt)746 static long zcrypt_rsa_crt(struct ap_perms *perms,
747 struct zcrypt_track *tr,
748 struct ica_rsa_modexpo_crt *crt)
749 {
750 struct zcrypt_card *zc, *pref_zc;
751 struct zcrypt_queue *zq, *pref_zq;
752 struct ap_message ap_msg;
753 unsigned int wgt = 0, pref_wgt = 0;
754 unsigned int func_code = 0;
755 int cpen, qpen, qid = 0, rc;
756 struct module *mod;
757
758 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
759
760 rc = ap_init_apmsg(&ap_msg, 0);
761 if (rc)
762 goto out;
763
764 if (crt->outputdatalength < crt->inputdatalength) {
765 rc = -EINVAL;
766 goto out;
767 }
768
769 /*
770 * As long as outputdatalength is big enough, we can set the
771 * outputdatalength equal to the inputdatalength, since that is the
772 * number of bytes we will copy in any case
773 */
774 crt->outputdatalength = crt->inputdatalength;
775
776 rc = get_rsa_crt_fc(crt, &func_code);
777 if (rc)
778 goto out;
779
780 pref_zc = NULL;
781 pref_zq = NULL;
782 spin_lock(&zcrypt_list_lock);
783 for_each_zcrypt_card(zc) {
784 /* Check for usable accelerator or CCA card */
785 if (!zc->online || !zc->card->config || zc->card->chkstop ||
786 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
787 continue;
788 /* Check for size limits */
789 if (zc->min_mod_size > crt->inputdatalength ||
790 zc->max_mod_size < crt->inputdatalength)
791 continue;
792 /* check if device node has admission for this card */
793 if (!zcrypt_check_card(perms, zc->card->id))
794 continue;
795 /* get weight index of the card device */
796 wgt = zc->speed_rating[func_code];
797 /* penalty if this msg was previously sent via this card */
798 cpen = (tr && tr->again_counter && tr->last_qid &&
799 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
800 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
801 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
802 continue;
803 for_each_zcrypt_queue(zq, zc) {
804 /* check if device is usable and eligible */
805 if (!zq->online || !zq->ops->rsa_modexpo_crt ||
806 !ap_queue_usable(zq->queue))
807 continue;
808 /* check if device node has admission for this queue */
809 if (!zcrypt_check_queue(perms,
810 AP_QID_QUEUE(zq->queue->qid)))
811 continue;
812 /* penalty if the msg was previously sent at this qid */
813 qpen = (tr && tr->again_counter && tr->last_qid &&
814 tr->last_qid == zq->queue->qid) ?
815 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
816 if (!zcrypt_queue_compare(zq, pref_zq,
817 wgt + cpen + qpen, pref_wgt))
818 continue;
819 pref_zc = zc;
820 pref_zq = zq;
821 pref_wgt = wgt + cpen + qpen;
822 }
823 }
824 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
825 spin_unlock(&zcrypt_list_lock);
826
827 if (!pref_zq) {
828 pr_debug("no matching queue found => ENODEV\n");
829 rc = -ENODEV;
830 goto out;
831 }
832
833 qid = pref_zq->queue->qid;
834 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
835
836 spin_lock(&zcrypt_list_lock);
837 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
838 spin_unlock(&zcrypt_list_lock);
839
840 out:
841 ap_release_apmsg(&ap_msg);
842 if (tr) {
843 tr->last_rc = rc;
844 tr->last_qid = qid;
845 }
846 trace_s390_zcrypt_rep(crt, func_code, rc,
847 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
848 ap_msg.psmid);
849 return rc;
850 }
851
_zcrypt_send_cprb(u32 xflags,struct ap_perms * perms,struct zcrypt_track * tr,struct ica_xcRB * xcrb)852 static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms,
853 struct zcrypt_track *tr,
854 struct ica_xcRB *xcrb)
855 {
856 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
857 struct zcrypt_card *zc, *pref_zc;
858 struct zcrypt_queue *zq, *pref_zq;
859 struct ap_message ap_msg;
860 unsigned int wgt = 0, pref_wgt = 0;
861 unsigned int func_code = 0;
862 unsigned short *domain, tdom;
863 int cpen, qpen, qid = 0, rc;
864 struct module *mod;
865
866 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
867
868 xcrb->status = 0;
869
870 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
871 AP_MSG_FLAG_MEMPOOL : 0);
872 if (rc)
873 goto out;
874
875 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
876 if (rc)
877 goto out;
878 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
879 ap_msg.msg, ap_msg.len, false);
880
881 tdom = *domain;
882 if (perms != &ap_perms && tdom < AP_DOMAINS) {
883 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
884 if (!test_bit_inv(tdom, perms->adm)) {
885 rc = -ENODEV;
886 goto out;
887 }
888 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
889 rc = -EOPNOTSUPP;
890 goto out;
891 }
892 }
893 /*
894 * If a valid target domain is set and this domain is NOT a usage
895 * domain but a control only domain, autoselect target domain.
896 */
897 if (tdom < AP_DOMAINS &&
898 !ap_test_config_usage_domain(tdom) &&
899 ap_test_config_ctrl_domain(tdom))
900 tdom = AUTOSEL_DOM;
901
902 pref_zc = NULL;
903 pref_zq = NULL;
904 spin_lock(&zcrypt_list_lock);
905 for_each_zcrypt_card(zc) {
906 /* Check for usable CCA card */
907 if (!zc->online || !zc->card->config || zc->card->chkstop ||
908 !zc->card->hwinfo.cca)
909 continue;
910 /* Check for user selected CCA card */
911 if (xcrb->user_defined != AUTOSELECT &&
912 xcrb->user_defined != zc->card->id)
913 continue;
914 /* check if request size exceeds card max msg size */
915 if (ap_msg.len > zc->card->maxmsgsize)
916 continue;
917 /* check if device node has admission for this card */
918 if (!zcrypt_check_card(perms, zc->card->id))
919 continue;
920 /* get weight index of the card device */
921 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
922 /* penalty if this msg was previously sent via this card */
923 cpen = (tr && tr->again_counter && tr->last_qid &&
924 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
925 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
926 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
927 continue;
928 for_each_zcrypt_queue(zq, zc) {
929 /* check for device usable and eligible */
930 if (!zq->online || !zq->ops->send_cprb ||
931 !ap_queue_usable(zq->queue) ||
932 (tdom != AUTOSEL_DOM &&
933 tdom != AP_QID_QUEUE(zq->queue->qid)))
934 continue;
935 /* check if device node has admission for this queue */
936 if (!zcrypt_check_queue(perms,
937 AP_QID_QUEUE(zq->queue->qid)))
938 continue;
939 /* penalty if the msg was previously sent at this qid */
940 qpen = (tr && tr->again_counter && tr->last_qid &&
941 tr->last_qid == zq->queue->qid) ?
942 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
943 if (!zcrypt_queue_compare(zq, pref_zq,
944 wgt + cpen + qpen, pref_wgt))
945 continue;
946 pref_zc = zc;
947 pref_zq = zq;
948 pref_wgt = wgt + cpen + qpen;
949 }
950 }
951 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
952 spin_unlock(&zcrypt_list_lock);
953
954 if (!pref_zq) {
955 pr_debug("no match for address %02x.%04x => ENODEV\n",
956 xcrb->user_defined, *domain);
957 rc = -ENODEV;
958 goto out;
959 }
960
961 /* in case of auto select, provide the correct domain */
962 qid = pref_zq->queue->qid;
963 if (*domain == AUTOSEL_DOM)
964 *domain = AP_QID_QUEUE(qid);
965
966 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
967 if (!rc) {
968 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
969 ap_msg.msg, ap_msg.len, false);
970 }
971
972 spin_lock(&zcrypt_list_lock);
973 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
974 spin_unlock(&zcrypt_list_lock);
975
976 out:
977 ap_release_apmsg(&ap_msg);
978 if (tr) {
979 tr->last_rc = rc;
980 tr->last_qid = qid;
981 }
982 trace_s390_zcrypt_rep(xcrb, func_code, rc,
983 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
984 ap_msg.psmid);
985 return rc;
986 }
987
zcrypt_send_cprb(struct ica_xcRB * xcrb,u32 xflags)988 long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
989 {
990 struct zcrypt_track tr;
991 int rc;
992
993 memset(&tr, 0, sizeof(tr));
994
995 do {
996 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
997 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
998
999 /* on ENODEV failure: retry once again after a requested rescan */
1000 if (rc == -ENODEV && zcrypt_process_rescan())
1001 do {
1002 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
1003 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1004 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1005 rc = -EIO;
1006 if (rc)
1007 pr_debug("rc=%d\n", rc);
1008
1009 return rc;
1010 }
1011 EXPORT_SYMBOL(zcrypt_send_cprb);
1012
is_desired_ep11_card(unsigned int dev_id,unsigned short target_num,struct ep11_target_dev * targets)1013 static bool is_desired_ep11_card(unsigned int dev_id,
1014 unsigned short target_num,
1015 struct ep11_target_dev *targets)
1016 {
1017 while (target_num-- > 0) {
1018 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1019 return true;
1020 targets++;
1021 }
1022 return false;
1023 }
1024
is_desired_ep11_queue(unsigned int dev_qid,unsigned short target_num,struct ep11_target_dev * targets)1025 static bool is_desired_ep11_queue(unsigned int dev_qid,
1026 unsigned short target_num,
1027 struct ep11_target_dev *targets)
1028 {
1029 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1030
1031 while (target_num-- > 0) {
1032 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1033 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1034 return true;
1035 targets++;
1036 }
1037 return false;
1038 }
1039
_zcrypt_send_ep11_cprb(u32 xflags,struct ap_perms * perms,struct zcrypt_track * tr,struct ep11_urb * xcrb)1040 static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms,
1041 struct zcrypt_track *tr,
1042 struct ep11_urb *xcrb)
1043 {
1044 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
1045 struct zcrypt_card *zc, *pref_zc;
1046 struct zcrypt_queue *zq, *pref_zq;
1047 struct ep11_target_dev *targets = NULL;
1048 unsigned short target_num;
1049 unsigned int wgt = 0, pref_wgt = 0;
1050 unsigned int func_code = 0, domain;
1051 struct ap_message ap_msg;
1052 int cpen, qpen, qid = 0, rc;
1053 struct module *mod;
1054
1055 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1056
1057 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
1058 AP_MSG_FLAG_MEMPOOL : 0);
1059 if (rc)
1060 goto out;
1061
1062 target_num = (unsigned short)xcrb->targets_num;
1063
1064 /* empty list indicates autoselect (all available targets) */
1065 rc = -ENOMEM;
1066 if (target_num != 0) {
1067 if (userspace) {
1068 targets = kzalloc_objs(*targets, target_num, GFP_KERNEL);
1069 if (!targets)
1070 goto out;
1071 if (copy_from_user(targets, xcrb->targets,
1072 target_num * sizeof(*targets))) {
1073 rc = -EFAULT;
1074 goto out;
1075 }
1076 } else {
1077 targets = (struct ep11_target_dev __force __kernel *)xcrb->targets;
1078 }
1079 }
1080
1081 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1082 if (rc)
1083 goto out;
1084 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
1085 ap_msg.msg, ap_msg.len, false);
1086
1087 if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1088 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1089 if (!test_bit_inv(domain, perms->adm)) {
1090 rc = -ENODEV;
1091 goto out;
1092 }
1093 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1094 rc = -EOPNOTSUPP;
1095 goto out;
1096 }
1097 }
1098
1099 pref_zc = NULL;
1100 pref_zq = NULL;
1101 spin_lock(&zcrypt_list_lock);
1102 for_each_zcrypt_card(zc) {
1103 /* Check for usable EP11 card */
1104 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1105 !zc->card->hwinfo.ep11)
1106 continue;
1107 /* Check for user selected EP11 card */
1108 if (targets &&
1109 !is_desired_ep11_card(zc->card->id, target_num, targets))
1110 continue;
1111 /* check if request size exceeds card max msg size */
1112 if (ap_msg.len > zc->card->maxmsgsize)
1113 continue;
1114 /* check if device node has admission for this card */
1115 if (!zcrypt_check_card(perms, zc->card->id))
1116 continue;
1117 /* get weight index of the card device */
1118 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1119 /* penalty if this msg was previously sent via this card */
1120 cpen = (tr && tr->again_counter && tr->last_qid &&
1121 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1122 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1123 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1124 continue;
1125 for_each_zcrypt_queue(zq, zc) {
1126 /* check if device is usable and eligible */
1127 if (!zq->online || !zq->ops->send_ep11_cprb ||
1128 !ap_queue_usable(zq->queue) ||
1129 (targets &&
1130 !is_desired_ep11_queue(zq->queue->qid,
1131 target_num, targets)))
1132 continue;
1133 /* check if device node has admission for this queue */
1134 if (!zcrypt_check_queue(perms,
1135 AP_QID_QUEUE(zq->queue->qid)))
1136 continue;
1137 /* penalty if the msg was previously sent at this qid */
1138 qpen = (tr && tr->again_counter && tr->last_qid &&
1139 tr->last_qid == zq->queue->qid) ?
1140 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1141 if (!zcrypt_queue_compare(zq, pref_zq,
1142 wgt + cpen + qpen, pref_wgt))
1143 continue;
1144 pref_zc = zc;
1145 pref_zq = zq;
1146 pref_wgt = wgt + cpen + qpen;
1147 }
1148 }
1149 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1150 spin_unlock(&zcrypt_list_lock);
1151
1152 if (!pref_zq) {
1153 if (targets && target_num == 1) {
1154 pr_debug("no match for address %02x.%04x => ENODEV\n",
1155 (int)targets->ap_id, (int)targets->dom_id);
1156 } else if (targets) {
1157 pr_debug("no match for %d target addrs => ENODEV\n",
1158 (int)target_num);
1159 } else {
1160 pr_debug("no match for address ff.ffff => ENODEV\n");
1161 }
1162 rc = -ENODEV;
1163 goto out;
1164 }
1165
1166 qid = pref_zq->queue->qid;
1167 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1168 if (!rc) {
1169 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
1170 ap_msg.msg, ap_msg.len, false);
1171 }
1172
1173 spin_lock(&zcrypt_list_lock);
1174 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1175 spin_unlock(&zcrypt_list_lock);
1176
1177 out:
1178 if (userspace)
1179 kfree(targets);
1180 ap_release_apmsg(&ap_msg);
1181 if (tr) {
1182 tr->last_rc = rc;
1183 tr->last_qid = qid;
1184 }
1185 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1186 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1187 ap_msg.psmid);
1188 return rc;
1189 }
1190
zcrypt_send_ep11_cprb(struct ep11_urb * xcrb,u32 xflags)1191 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
1192 {
1193 struct zcrypt_track tr;
1194 int rc;
1195
1196 memset(&tr, 0, sizeof(tr));
1197
1198 do {
1199 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1200 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1201
1202 /* on ENODEV failure: retry once again after a requested rescan */
1203 if (rc == -ENODEV && zcrypt_process_rescan())
1204 do {
1205 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1206 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1207 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1208 rc = -EIO;
1209 if (rc)
1210 pr_debug("rc=%d\n", rc);
1211
1212 return rc;
1213 }
1214 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1215
zcrypt_rng(char * buffer)1216 static long zcrypt_rng(char *buffer)
1217 {
1218 struct zcrypt_card *zc, *pref_zc;
1219 struct zcrypt_queue *zq, *pref_zq;
1220 unsigned int wgt = 0, pref_wgt = 0;
1221 unsigned int func_code = 0;
1222 struct ap_message ap_msg;
1223 unsigned int domain;
1224 int qid = 0, rc = -ENODEV;
1225 struct module *mod;
1226
1227 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1228
1229 rc = ap_init_apmsg(&ap_msg, 0);
1230 if (rc)
1231 goto out;
1232 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1233 if (rc)
1234 goto out;
1235
1236 pref_zc = NULL;
1237 pref_zq = NULL;
1238 spin_lock(&zcrypt_list_lock);
1239 for_each_zcrypt_card(zc) {
1240 /* Check for usable CCA card */
1241 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1242 !zc->card->hwinfo.cca)
1243 continue;
1244 /* get weight index of the card device */
1245 wgt = zc->speed_rating[func_code];
1246 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1247 continue;
1248 for_each_zcrypt_queue(zq, zc) {
1249 /* check if device is usable and eligible */
1250 if (!zq->online || !zq->ops->rng ||
1251 !ap_queue_usable(zq->queue))
1252 continue;
1253 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1254 continue;
1255 pref_zc = zc;
1256 pref_zq = zq;
1257 pref_wgt = wgt;
1258 }
1259 }
1260 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1261 spin_unlock(&zcrypt_list_lock);
1262
1263 if (!pref_zq) {
1264 pr_debug("no matching queue found => ENODEV\n");
1265 rc = -ENODEV;
1266 goto out;
1267 }
1268
1269 qid = pref_zq->queue->qid;
1270 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1271
1272 spin_lock(&zcrypt_list_lock);
1273 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1274 spin_unlock(&zcrypt_list_lock);
1275
1276 out:
1277 ap_release_apmsg(&ap_msg);
1278 trace_s390_zcrypt_rep(buffer, func_code, rc,
1279 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1280 ap_msg.psmid);
1281 return rc;
1282 }
1283
zcrypt_device_status_mask(struct zcrypt_device_status * devstatus)1284 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1285 {
1286 struct zcrypt_card *zc;
1287 struct zcrypt_queue *zq;
1288 struct zcrypt_device_status *stat;
1289 int card, queue;
1290
1291 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1292 * sizeof(struct zcrypt_device_status));
1293
1294 spin_lock(&zcrypt_list_lock);
1295 for_each_zcrypt_card(zc) {
1296 for_each_zcrypt_queue(zq, zc) {
1297 card = AP_QID_CARD(zq->queue->qid);
1298 if (card >= MAX_ZDEV_CARDIDS)
1299 continue;
1300 queue = AP_QID_QUEUE(zq->queue->qid);
1301 stat = &devstatus[card * AP_DOMAINS + queue];
1302 stat->hwtype = zc->card->ap_dev.device_type;
1303 stat->functions = zc->card->hwinfo.fac >> 26;
1304 stat->qid = zq->queue->qid;
1305 stat->online = zq->online ? 0x01 : 0x00;
1306 }
1307 }
1308 spin_unlock(&zcrypt_list_lock);
1309 }
1310
zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext * devstatus,int maxcard,int maxqueue)1311 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
1312 int maxcard, int maxqueue)
1313 {
1314 struct zcrypt_card *zc;
1315 struct zcrypt_queue *zq;
1316 struct zcrypt_device_status_ext *stat;
1317 int card, queue;
1318
1319 maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT);
1320 maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT);
1321
1322 spin_lock(&zcrypt_list_lock);
1323 for_each_zcrypt_card(zc) {
1324 for_each_zcrypt_queue(zq, zc) {
1325 card = AP_QID_CARD(zq->queue->qid);
1326 queue = AP_QID_QUEUE(zq->queue->qid);
1327 if (card >= maxcard || queue >= maxqueue)
1328 continue;
1329 stat = &devstatus[card * maxqueue + queue];
1330 stat->hwtype = zc->card->ap_dev.device_type;
1331 stat->functions = zc->card->hwinfo.fac >> 26;
1332 stat->qid = zq->queue->qid;
1333 stat->online = zq->online ? 0x01 : 0x00;
1334 }
1335 }
1336 spin_unlock(&zcrypt_list_lock);
1337 }
1338 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1339
zcrypt_device_status_ext(int card,int queue,struct zcrypt_device_status_ext * devstat)1340 int zcrypt_device_status_ext(int card, int queue,
1341 struct zcrypt_device_status_ext *devstat)
1342 {
1343 struct zcrypt_card *zc;
1344 struct zcrypt_queue *zq;
1345
1346 memset(devstat, 0, sizeof(*devstat));
1347
1348 spin_lock(&zcrypt_list_lock);
1349 for_each_zcrypt_card(zc) {
1350 for_each_zcrypt_queue(zq, zc) {
1351 if (card == AP_QID_CARD(zq->queue->qid) &&
1352 queue == AP_QID_QUEUE(zq->queue->qid)) {
1353 devstat->hwtype = zc->card->ap_dev.device_type;
1354 devstat->functions = zc->card->hwinfo.fac >> 26;
1355 devstat->qid = zq->queue->qid;
1356 devstat->online = zq->online ? 0x01 : 0x00;
1357 spin_unlock(&zcrypt_list_lock);
1358 return 0;
1359 }
1360 }
1361 }
1362 spin_unlock(&zcrypt_list_lock);
1363
1364 return -ENODEV;
1365 }
1366 EXPORT_SYMBOL(zcrypt_device_status_ext);
1367
zcrypt_status_mask(char status[],size_t max_adapters)1368 static void zcrypt_status_mask(char status[], size_t max_adapters)
1369 {
1370 struct zcrypt_card *zc;
1371 struct zcrypt_queue *zq;
1372 int card;
1373
1374 memset(status, 0, max_adapters);
1375 spin_lock(&zcrypt_list_lock);
1376 for_each_zcrypt_card(zc) {
1377 for_each_zcrypt_queue(zq, zc) {
1378 card = AP_QID_CARD(zq->queue->qid);
1379 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1380 card >= max_adapters)
1381 continue;
1382 status[card] = zc->online ? zc->user_space_type : 0x0d;
1383 }
1384 }
1385 spin_unlock(&zcrypt_list_lock);
1386 }
1387
zcrypt_qdepth_mask(char qdepth[],size_t max_adapters)1388 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1389 {
1390 struct zcrypt_card *zc;
1391 struct zcrypt_queue *zq;
1392 int card;
1393
1394 memset(qdepth, 0, max_adapters);
1395 spin_lock(&zcrypt_list_lock);
1396 local_bh_disable();
1397 for_each_zcrypt_card(zc) {
1398 for_each_zcrypt_queue(zq, zc) {
1399 card = AP_QID_CARD(zq->queue->qid);
1400 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1401 card >= max_adapters)
1402 continue;
1403 spin_lock(&zq->queue->lock);
1404 qdepth[card] =
1405 zq->queue->pendingq_count +
1406 zq->queue->requestq_count;
1407 spin_unlock(&zq->queue->lock);
1408 }
1409 }
1410 local_bh_enable();
1411 spin_unlock(&zcrypt_list_lock);
1412 }
1413
zcrypt_perdev_reqcnt(u32 reqcnt[],size_t max_adapters)1414 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1415 {
1416 struct zcrypt_card *zc;
1417 struct zcrypt_queue *zq;
1418 int card;
1419 u64 cnt;
1420
1421 memset(reqcnt, 0, sizeof(int) * max_adapters);
1422 spin_lock(&zcrypt_list_lock);
1423 local_bh_disable();
1424 for_each_zcrypt_card(zc) {
1425 for_each_zcrypt_queue(zq, zc) {
1426 card = AP_QID_CARD(zq->queue->qid);
1427 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1428 card >= max_adapters)
1429 continue;
1430 spin_lock(&zq->queue->lock);
1431 cnt = zq->queue->total_request_count;
1432 spin_unlock(&zq->queue->lock);
1433 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1434 }
1435 }
1436 local_bh_enable();
1437 spin_unlock(&zcrypt_list_lock);
1438 }
1439
zcrypt_pendingq_count(void)1440 static int zcrypt_pendingq_count(void)
1441 {
1442 struct zcrypt_card *zc;
1443 struct zcrypt_queue *zq;
1444 int pendingq_count;
1445
1446 pendingq_count = 0;
1447 spin_lock(&zcrypt_list_lock);
1448 local_bh_disable();
1449 for_each_zcrypt_card(zc) {
1450 for_each_zcrypt_queue(zq, zc) {
1451 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1452 continue;
1453 spin_lock(&zq->queue->lock);
1454 pendingq_count += zq->queue->pendingq_count;
1455 spin_unlock(&zq->queue->lock);
1456 }
1457 }
1458 local_bh_enable();
1459 spin_unlock(&zcrypt_list_lock);
1460 return pendingq_count;
1461 }
1462
zcrypt_requestq_count(void)1463 static int zcrypt_requestq_count(void)
1464 {
1465 struct zcrypt_card *zc;
1466 struct zcrypt_queue *zq;
1467 int requestq_count;
1468
1469 requestq_count = 0;
1470 spin_lock(&zcrypt_list_lock);
1471 local_bh_disable();
1472 for_each_zcrypt_card(zc) {
1473 for_each_zcrypt_queue(zq, zc) {
1474 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1475 continue;
1476 spin_lock(&zq->queue->lock);
1477 requestq_count += zq->queue->requestq_count;
1478 spin_unlock(&zq->queue->lock);
1479 }
1480 }
1481 local_bh_enable();
1482 spin_unlock(&zcrypt_list_lock);
1483 return requestq_count;
1484 }
1485
icarsamodexpo_ioctl(struct ap_perms * perms,unsigned long arg)1486 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1487 {
1488 int rc;
1489 struct zcrypt_track tr;
1490 struct ica_rsa_modexpo mex;
1491 struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1492
1493 memset(&tr, 0, sizeof(tr));
1494 if (copy_from_user(&mex, umex, sizeof(mex)))
1495 return -EFAULT;
1496
1497 do {
1498 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1499 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1500
1501 /* on ENODEV failure: retry once again after a requested rescan */
1502 if (rc == -ENODEV && zcrypt_process_rescan())
1503 do {
1504 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1505 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1506 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1507 rc = -EIO;
1508 if (rc) {
1509 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
1510 return rc;
1511 }
1512 return put_user(mex.outputdatalength, &umex->outputdatalength);
1513 }
1514
icarsacrt_ioctl(struct ap_perms * perms,unsigned long arg)1515 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1516 {
1517 int rc;
1518 struct zcrypt_track tr;
1519 struct ica_rsa_modexpo_crt crt;
1520 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1521
1522 memset(&tr, 0, sizeof(tr));
1523 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1524 return -EFAULT;
1525
1526 do {
1527 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1528 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1529
1530 /* on ENODEV failure: retry once again after a requested rescan */
1531 if (rc == -ENODEV && zcrypt_process_rescan())
1532 do {
1533 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1534 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1535 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1536 rc = -EIO;
1537 if (rc) {
1538 pr_debug("ioctl ICARSACRT rc=%d\n", rc);
1539 return rc;
1540 }
1541 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1542 }
1543
zsecsendcprb_ioctl(struct ap_perms * perms,unsigned long arg)1544 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1545 {
1546 int rc;
1547 struct ica_xcRB xcrb;
1548 struct zcrypt_track tr;
1549 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1550 struct ica_xcRB __user *uxcrb = (void __user *)arg;
1551
1552 memset(&tr, 0, sizeof(tr));
1553 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1554 return -EFAULT;
1555
1556 do {
1557 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1558 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1559
1560 /* on ENODEV failure: retry once again after a requested rescan */
1561 if (rc == -ENODEV && zcrypt_process_rescan())
1562 do {
1563 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1564 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1565 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1566 rc = -EIO;
1567 if (rc)
1568 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1569 rc, xcrb.status);
1570 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1571 return -EFAULT;
1572 return rc;
1573 }
1574
zsendep11cprb_ioctl(struct ap_perms * perms,unsigned long arg)1575 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1576 {
1577 int rc;
1578 struct ep11_urb xcrb;
1579 struct zcrypt_track tr;
1580 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1581 struct ep11_urb __user *uxcrb = (void __user *)arg;
1582
1583 memset(&tr, 0, sizeof(tr));
1584 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1585 return -EFAULT;
1586
1587 do {
1588 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1589 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1590
1591 /* on ENODEV failure: retry once again after a requested rescan */
1592 if (rc == -ENODEV && zcrypt_process_rescan())
1593 do {
1594 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1595 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1596 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1597 rc = -EIO;
1598 if (rc)
1599 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1600 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1601 return -EFAULT;
1602 return rc;
1603 }
1604
zcrypt_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1605 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1606 unsigned long arg)
1607 {
1608 int rc;
1609 struct ap_perms *perms =
1610 (struct ap_perms *)filp->private_data;
1611
1612 rc = zcrypt_check_ioctl(perms, cmd);
1613 if (rc)
1614 return rc;
1615
1616 switch (cmd) {
1617 case ICARSAMODEXPO:
1618 return icarsamodexpo_ioctl(perms, arg);
1619 case ICARSACRT:
1620 return icarsacrt_ioctl(perms, arg);
1621 case ZSECSENDCPRB:
1622 return zsecsendcprb_ioctl(perms, arg);
1623 case ZSENDEP11CPRB:
1624 return zsendep11cprb_ioctl(perms, arg);
1625 case ZCRYPT_DEVICE_STATUS: {
1626 struct zcrypt_device_status_ext *device_status;
1627 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1628 * sizeof(struct zcrypt_device_status_ext);
1629
1630 device_status = kvzalloc_objs(struct zcrypt_device_status_ext,
1631 MAX_ZDEV_ENTRIES_EXT, GFP_KERNEL);
1632 if (!device_status)
1633 return -ENOMEM;
1634 zcrypt_device_status_mask_ext(device_status,
1635 MAX_ZDEV_CARDIDS_EXT,
1636 MAX_ZDEV_DOMAINS_EXT);
1637 if (copy_to_user((char __user *)arg, device_status,
1638 total_size))
1639 rc = -EFAULT;
1640 kvfree(device_status);
1641 return rc;
1642 }
1643 case ZCRYPT_STATUS_MASK: {
1644 char status[AP_DEVICES];
1645
1646 zcrypt_status_mask(status, AP_DEVICES);
1647 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1648 return -EFAULT;
1649 return 0;
1650 }
1651 case ZCRYPT_QDEPTH_MASK: {
1652 char qdepth[AP_DEVICES];
1653
1654 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1655 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1656 return -EFAULT;
1657 return 0;
1658 }
1659 case ZCRYPT_PERDEV_REQCNT: {
1660 u32 *reqcnt;
1661
1662 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1663 if (!reqcnt)
1664 return -ENOMEM;
1665 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1666 if (copy_to_user((int __user *)arg, reqcnt,
1667 sizeof(u32) * AP_DEVICES))
1668 rc = -EFAULT;
1669 kfree(reqcnt);
1670 return rc;
1671 }
1672 case Z90STAT_REQUESTQ_COUNT:
1673 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1674 case Z90STAT_PENDINGQ_COUNT:
1675 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1676 case Z90STAT_TOTALOPEN_COUNT:
1677 return put_user(atomic_read(&zcrypt_open_count),
1678 (int __user *)arg);
1679 case Z90STAT_DOMAIN_INDEX:
1680 return put_user(ap_domain_index, (int __user *)arg);
1681 /*
1682 * Deprecated ioctls
1683 */
1684 case ZDEVICESTATUS: {
1685 /* the old ioctl supports only 64 adapters */
1686 struct zcrypt_device_status *device_status;
1687 size_t total_size = MAX_ZDEV_ENTRIES
1688 * sizeof(struct zcrypt_device_status);
1689
1690 device_status = kzalloc(total_size, GFP_KERNEL);
1691 if (!device_status)
1692 return -ENOMEM;
1693 zcrypt_device_status_mask(device_status);
1694 if (copy_to_user((char __user *)arg, device_status,
1695 total_size))
1696 rc = -EFAULT;
1697 kfree(device_status);
1698 return rc;
1699 }
1700 case Z90STAT_STATUS_MASK: {
1701 /* the old ioctl supports only 64 adapters */
1702 char status[MAX_ZDEV_CARDIDS];
1703
1704 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1705 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1706 return -EFAULT;
1707 return 0;
1708 }
1709 case Z90STAT_QDEPTH_MASK: {
1710 /* the old ioctl supports only 64 adapters */
1711 char qdepth[MAX_ZDEV_CARDIDS];
1712
1713 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1714 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1715 return -EFAULT;
1716 return 0;
1717 }
1718 case Z90STAT_PERDEV_REQCNT: {
1719 /* the old ioctl supports only 64 adapters */
1720 u32 reqcnt[MAX_ZDEV_CARDIDS];
1721
1722 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1723 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1724 return -EFAULT;
1725 return 0;
1726 }
1727 /* unknown ioctl number */
1728 default:
1729 pr_debug("unknown ioctl 0x%08x\n", cmd);
1730 return -ENOIOCTLCMD;
1731 }
1732 }
1733
1734 /*
1735 * Misc device file operations.
1736 */
1737 static const struct file_operations zcrypt_fops = {
1738 .owner = THIS_MODULE,
1739 .read = zcrypt_read,
1740 .write = zcrypt_write,
1741 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1742 .open = zcrypt_open,
1743 .release = zcrypt_release,
1744 };
1745
1746 /*
1747 * Misc device.
1748 */
1749 static struct miscdevice zcrypt_misc_device = {
1750 .minor = MISC_DYNAMIC_MINOR,
1751 .name = "z90crypt",
1752 .fops = &zcrypt_fops,
1753 };
1754
1755 static int zcrypt_rng_device_count;
1756 static u32 *zcrypt_rng_buffer;
1757 static int zcrypt_rng_buffer_index;
1758 static DEFINE_MUTEX(zcrypt_rng_mutex);
1759
zcrypt_rng_data_read(struct hwrng * rng,u32 * data)1760 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1761 {
1762 int rc;
1763
1764 /*
1765 * We don't need locking here because the RNG API guarantees serialized
1766 * read method calls.
1767 */
1768 if (zcrypt_rng_buffer_index == 0) {
1769 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1770 /* on ENODEV failure: retry once again after an AP bus rescan */
1771 if (rc == -ENODEV && zcrypt_process_rescan())
1772 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1773 if (rc < 0)
1774 return -EIO;
1775 zcrypt_rng_buffer_index = rc / sizeof(*data);
1776 }
1777 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1778 return sizeof(*data);
1779 }
1780
1781 static struct hwrng zcrypt_rng_dev = {
1782 .name = "zcrypt",
1783 .data_read = zcrypt_rng_data_read,
1784 .quality = 990,
1785 };
1786
zcrypt_rng_device_add(void)1787 int zcrypt_rng_device_add(void)
1788 {
1789 int rc = 0;
1790
1791 mutex_lock(&zcrypt_rng_mutex);
1792 if (zcrypt_rng_device_count == 0) {
1793 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
1794 if (!zcrypt_rng_buffer) {
1795 rc = -ENOMEM;
1796 goto out;
1797 }
1798 zcrypt_rng_buffer_index = 0;
1799 rc = hwrng_register(&zcrypt_rng_dev);
1800 if (rc)
1801 goto out_free;
1802 zcrypt_rng_device_count = 1;
1803 } else {
1804 zcrypt_rng_device_count++;
1805 }
1806 mutex_unlock(&zcrypt_rng_mutex);
1807 return 0;
1808
1809 out_free:
1810 free_page((unsigned long)zcrypt_rng_buffer);
1811 out:
1812 mutex_unlock(&zcrypt_rng_mutex);
1813 return rc;
1814 }
1815
zcrypt_rng_device_remove(void)1816 void zcrypt_rng_device_remove(void)
1817 {
1818 mutex_lock(&zcrypt_rng_mutex);
1819 zcrypt_rng_device_count--;
1820 if (zcrypt_rng_device_count == 0) {
1821 hwrng_unregister(&zcrypt_rng_dev);
1822 free_page((unsigned long)zcrypt_rng_buffer);
1823 }
1824 mutex_unlock(&zcrypt_rng_mutex);
1825 }
1826
1827 /*
1828 * Wait until the zcrypt api is operational.
1829 * The AP bus scan and the binding of ap devices to device drivers is
1830 * an asynchronous job. This function waits until these initial jobs
1831 * are done and so the zcrypt api should be ready to serve crypto
1832 * requests - if there are resources available. The function uses an
1833 * internal timeout of 30s. The very first caller will either wait for
1834 * ap bus bindings complete or the timeout happens. This state will be
1835 * remembered for further callers which will only be blocked until a
1836 * decision is made (timeout or bindings complete).
1837 * On timeout -ETIME is returned, on success the return value is 0.
1838 */
zcrypt_wait_api_operational(void)1839 int zcrypt_wait_api_operational(void)
1840 {
1841 static DEFINE_MUTEX(zcrypt_wait_api_lock);
1842 static int zcrypt_wait_api_state;
1843 int rc;
1844
1845 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
1846 if (rc)
1847 return rc;
1848
1849 switch (zcrypt_wait_api_state) {
1850 case 0:
1851 /* initial state, invoke wait for the ap bus complete */
1852 rc = ap_wait_apqn_bindings_complete(
1853 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
1854 switch (rc) {
1855 case 0:
1856 /* ap bus bindings are complete */
1857 zcrypt_wait_api_state = 1;
1858 break;
1859 case -EINTR:
1860 /* interrupted, go back to caller */
1861 break;
1862 case -ETIME:
1863 /* timeout */
1864 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
1865 __func__);
1866 zcrypt_wait_api_state = -ETIME;
1867 break;
1868 default:
1869 /* other failure */
1870 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
1871 break;
1872 }
1873 break;
1874 case 1:
1875 /* a previous caller already found ap bus bindings complete */
1876 rc = 0;
1877 break;
1878 default:
1879 /* a previous caller had timeout or other failure */
1880 rc = zcrypt_wait_api_state;
1881 break;
1882 }
1883
1884 mutex_unlock(&zcrypt_wait_api_lock);
1885
1886 return rc;
1887 }
1888 EXPORT_SYMBOL(zcrypt_wait_api_operational);
1889
zcrypt_debug_init(void)1890 int __init zcrypt_debug_init(void)
1891 {
1892 zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
1893 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
1894 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
1895 debug_set_level(zcrypt_dbf_info, DBF_ERR);
1896
1897 return 0;
1898 }
1899
zcrypt_debug_exit(void)1900 void zcrypt_debug_exit(void)
1901 {
1902 debug_unregister(zcrypt_dbf_info);
1903 }
1904
zcdn_init(void)1905 static int __init zcdn_init(void)
1906 {
1907 int rc;
1908
1909 /* create a new class 'zcrypt' */
1910 rc = class_register(&zcrypt_class);
1911 if (rc)
1912 goto out_class_register_failed;
1913
1914 /* alloc device minor range */
1915 rc = alloc_chrdev_region(&zcrypt_devt,
1916 0, ZCRYPT_MAX_MINOR_NODES,
1917 ZCRYPT_NAME);
1918 if (rc)
1919 goto out_alloc_chrdev_failed;
1920
1921 cdev_init(&zcrypt_cdev, &zcrypt_fops);
1922 zcrypt_cdev.owner = THIS_MODULE;
1923 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1924 if (rc)
1925 goto out_cdev_add_failed;
1926
1927 /* need some class specific sysfs attributes */
1928 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
1929 if (rc)
1930 goto out_class_create_file_1_failed;
1931 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
1932 if (rc)
1933 goto out_class_create_file_2_failed;
1934
1935 return 0;
1936
1937 out_class_create_file_2_failed:
1938 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
1939 out_class_create_file_1_failed:
1940 cdev_del(&zcrypt_cdev);
1941 out_cdev_add_failed:
1942 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1943 out_alloc_chrdev_failed:
1944 class_unregister(&zcrypt_class);
1945 out_class_register_failed:
1946 return rc;
1947 }
1948
zcdn_exit(void)1949 static void zcdn_exit(void)
1950 {
1951 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
1952 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
1953 zcdn_destroy_all();
1954 cdev_del(&zcrypt_cdev);
1955 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1956 class_unregister(&zcrypt_class);
1957 }
1958
1959 /*
1960 * zcrypt_api_init(): Module initialization.
1961 *
1962 * The module initialization code.
1963 */
zcrypt_api_init(void)1964 int __init zcrypt_api_init(void)
1965 {
1966 int rc;
1967
1968 /* make sure the mempool threshold is >= 1 */
1969 if (zcrypt_mempool_threshold < 1) {
1970 rc = -EINVAL;
1971 goto out;
1972 }
1973
1974 rc = zcrypt_debug_init();
1975 if (rc)
1976 goto out;
1977
1978 rc = zcdn_init();
1979 if (rc)
1980 goto out_zcdn_init_failed;
1981
1982 rc = zcrypt_ccamisc_init();
1983 if (rc)
1984 goto out_ccamisc_init_failed;
1985
1986 rc = zcrypt_ep11misc_init();
1987 if (rc)
1988 goto out_ep11misc_init_failed;
1989
1990 /* Register the request sprayer. */
1991 rc = misc_register(&zcrypt_misc_device);
1992 if (rc < 0)
1993 goto out_misc_register_failed;
1994
1995 zcrypt_msgtype6_init();
1996 zcrypt_msgtype50_init();
1997
1998 return 0;
1999
2000 out_misc_register_failed:
2001 zcrypt_ep11misc_exit();
2002 out_ep11misc_init_failed:
2003 zcrypt_ccamisc_exit();
2004 out_ccamisc_init_failed:
2005 zcdn_exit();
2006 out_zcdn_init_failed:
2007 zcrypt_debug_exit();
2008 out:
2009 return rc;
2010 }
2011
2012 /*
2013 * zcrypt_api_exit(): Module termination.
2014 *
2015 * The module termination code.
2016 */
zcrypt_api_exit(void)2017 void __exit zcrypt_api_exit(void)
2018 {
2019 zcdn_exit();
2020 misc_deregister(&zcrypt_misc_device);
2021 zcrypt_msgtype6_exit();
2022 zcrypt_msgtype50_exit();
2023 zcrypt_ccamisc_exit();
2024 zcrypt_ep11misc_exit();
2025 zcrypt_debug_exit();
2026 }
2027
2028 module_init(zcrypt_api_init);
2029 module_exit(zcrypt_api_exit);
2030