1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright IBM Corp. 2001, 2018
4 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
13 */
14
15 #define KMSG_COMPONENT "zcrypt"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/export.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/miscdevice.h>
23 #include <linux/fs.h>
24 #include <linux/compat.h>
25 #include <linux/slab.h>
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
28 #include <linux/hw_random.h>
29 #include <linux/debugfs.h>
30 #include <linux/cdev.h>
31 #include <linux/ctype.h>
32 #include <linux/capability.h>
33 #include <asm/debug.h>
34
35 #define CREATE_TRACE_POINTS
36 #include <asm/trace/zcrypt.h>
37
38 #include "zcrypt_api.h"
39 #include "zcrypt_debug.h"
40
41 #include "zcrypt_msgtype6.h"
42 #include "zcrypt_msgtype50.h"
43 #include "zcrypt_ccamisc.h"
44 #include "zcrypt_ep11misc.h"
45
46 /*
47 * Module description.
48 */
49 MODULE_AUTHOR("IBM Corporation");
50 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
51 "Copyright IBM Corp. 2001, 2012");
52 MODULE_LICENSE("GPL");
53
54 unsigned int zcrypt_mempool_threshold = 5;
55 module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440);
56 MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)");
57
58 /*
59 * zcrypt tracepoint functions
60 */
61 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
62 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
63
64 DEFINE_SPINLOCK(zcrypt_list_lock);
65 LIST_HEAD(zcrypt_card_list);
66
67 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
68
69 static LIST_HEAD(zcrypt_ops_list);
70
71 /* Zcrypt related debug feature stuff. */
72 debug_info_t *zcrypt_dbf_info;
73
74 /*
75 * Process a rescan of the transport layer.
76 * Runs a synchronous AP bus rescan.
77 * Returns true if something has changed (for example the
78 * bus scan has found and build up new devices) and it is
79 * worth to do a retry. Otherwise false is returned meaning
80 * no changes on the AP bus level.
81 */
zcrypt_process_rescan(void)82 static inline bool zcrypt_process_rescan(void)
83 {
84 return ap_bus_force_rescan();
85 }
86
zcrypt_msgtype_register(struct zcrypt_ops * zops)87 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
88 {
89 list_add_tail(&zops->list, &zcrypt_ops_list);
90 }
91
zcrypt_msgtype_unregister(struct zcrypt_ops * zops)92 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
93 {
94 list_del_init(&zops->list);
95 }
96
zcrypt_msgtype(unsigned char * name,int variant)97 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
98 {
99 struct zcrypt_ops *zops;
100
101 list_for_each_entry(zops, &zcrypt_ops_list, list)
102 if (zops->variant == variant &&
103 (!strncmp(zops->name, name, sizeof(zops->name))))
104 return zops;
105 return NULL;
106 }
107 EXPORT_SYMBOL(zcrypt_msgtype);
108
109 /*
110 * Multi device nodes extension functions.
111 */
112
113 struct zcdn_device;
114
115 static void zcdn_device_release(struct device *dev);
116 static const struct class zcrypt_class = {
117 .name = ZCRYPT_NAME,
118 .dev_release = zcdn_device_release,
119 };
120 static dev_t zcrypt_devt;
121 static struct cdev zcrypt_cdev;
122
123 struct zcdn_device {
124 struct device device;
125 struct ap_perms perms;
126 };
127
128 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
129
130 #define ZCDN_MAX_NAME 32
131
132 static int zcdn_create(const char *name);
133 static int zcdn_destroy(const char *name);
134
135 /*
136 * Find zcdn device by name.
137 * Returns reference to the zcdn device which needs to be released
138 * with put_device() after use.
139 */
find_zcdndev_by_name(const char * name)140 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
141 {
142 struct device *dev = class_find_device_by_name(&zcrypt_class, name);
143
144 return dev ? to_zcdn_dev(dev) : NULL;
145 }
146
147 /*
148 * Find zcdn device by devt value.
149 * Returns reference to the zcdn device which needs to be released
150 * with put_device() after use.
151 */
find_zcdndev_by_devt(dev_t devt)152 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
153 {
154 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
155
156 return dev ? to_zcdn_dev(dev) : NULL;
157 }
158
ioctlmask_show(struct device * dev,struct device_attribute * attr,char * buf)159 static ssize_t ioctlmask_show(struct device *dev,
160 struct device_attribute *attr,
161 char *buf)
162 {
163 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
164 int i, n;
165
166 if (mutex_lock_interruptible(&ap_perms_mutex))
167 return -ERESTARTSYS;
168
169 n = sysfs_emit(buf, "0x");
170 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
171 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
172 n += sysfs_emit_at(buf, n, "\n");
173
174 mutex_unlock(&ap_perms_mutex);
175
176 return n;
177 }
178
ioctlmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)179 static ssize_t ioctlmask_store(struct device *dev,
180 struct device_attribute *attr,
181 const char *buf, size_t count)
182 {
183 int rc;
184 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
185
186 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
187 AP_IOCTLS, &ap_perms_mutex);
188 if (rc)
189 return rc;
190
191 return count;
192 }
193
194 static DEVICE_ATTR_RW(ioctlmask);
195
apmask_show(struct device * dev,struct device_attribute * attr,char * buf)196 static ssize_t apmask_show(struct device *dev,
197 struct device_attribute *attr,
198 char *buf)
199 {
200 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
201 int i, n;
202
203 if (mutex_lock_interruptible(&ap_perms_mutex))
204 return -ERESTARTSYS;
205
206 n = sysfs_emit(buf, "0x");
207 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
208 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
209 n += sysfs_emit_at(buf, n, "\n");
210
211 mutex_unlock(&ap_perms_mutex);
212
213 return n;
214 }
215
apmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)216 static ssize_t apmask_store(struct device *dev,
217 struct device_attribute *attr,
218 const char *buf, size_t count)
219 {
220 int rc;
221 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
222
223 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
224 AP_DEVICES, &ap_perms_mutex);
225 if (rc)
226 return rc;
227
228 return count;
229 }
230
231 static DEVICE_ATTR_RW(apmask);
232
aqmask_show(struct device * dev,struct device_attribute * attr,char * buf)233 static ssize_t aqmask_show(struct device *dev,
234 struct device_attribute *attr,
235 char *buf)
236 {
237 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
238 int i, n;
239
240 if (mutex_lock_interruptible(&ap_perms_mutex))
241 return -ERESTARTSYS;
242
243 n = sysfs_emit(buf, "0x");
244 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
245 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
246 n += sysfs_emit_at(buf, n, "\n");
247
248 mutex_unlock(&ap_perms_mutex);
249
250 return n;
251 }
252
aqmask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)253 static ssize_t aqmask_store(struct device *dev,
254 struct device_attribute *attr,
255 const char *buf, size_t count)
256 {
257 int rc;
258 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
259
260 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
261 AP_DOMAINS, &ap_perms_mutex);
262 if (rc)
263 return rc;
264
265 return count;
266 }
267
268 static DEVICE_ATTR_RW(aqmask);
269
admask_show(struct device * dev,struct device_attribute * attr,char * buf)270 static ssize_t admask_show(struct device *dev,
271 struct device_attribute *attr,
272 char *buf)
273 {
274 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
275 int i, n;
276
277 if (mutex_lock_interruptible(&ap_perms_mutex))
278 return -ERESTARTSYS;
279
280 n = sysfs_emit(buf, "0x");
281 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
282 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
283 n += sysfs_emit_at(buf, n, "\n");
284
285 mutex_unlock(&ap_perms_mutex);
286
287 return n;
288 }
289
admask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)290 static ssize_t admask_store(struct device *dev,
291 struct device_attribute *attr,
292 const char *buf, size_t count)
293 {
294 int rc;
295 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
296
297 rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
298 AP_DOMAINS, &ap_perms_mutex);
299 if (rc)
300 return rc;
301
302 return count;
303 }
304
305 static DEVICE_ATTR_RW(admask);
306
307 static struct attribute *zcdn_dev_attrs[] = {
308 &dev_attr_ioctlmask.attr,
309 &dev_attr_apmask.attr,
310 &dev_attr_aqmask.attr,
311 &dev_attr_admask.attr,
312 NULL
313 };
314
315 static struct attribute_group zcdn_dev_attr_group = {
316 .attrs = zcdn_dev_attrs
317 };
318
319 static const struct attribute_group *zcdn_dev_attr_groups[] = {
320 &zcdn_dev_attr_group,
321 NULL
322 };
323
zcdn_create_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)324 static ssize_t zcdn_create_store(const struct class *class,
325 const struct class_attribute *attr,
326 const char *buf, size_t count)
327 {
328 int rc;
329 char name[ZCDN_MAX_NAME];
330
331 strscpy(name, skip_spaces(buf), sizeof(name));
332
333 rc = zcdn_create(strim(name));
334
335 return rc ? rc : count;
336 }
337
338 static const struct class_attribute class_attr_zcdn_create =
339 __ATTR(create, 0600, NULL, zcdn_create_store);
340
zcdn_destroy_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)341 static ssize_t zcdn_destroy_store(const struct class *class,
342 const struct class_attribute *attr,
343 const char *buf, size_t count)
344 {
345 int rc;
346 char name[ZCDN_MAX_NAME];
347
348 strscpy(name, skip_spaces(buf), sizeof(name));
349
350 rc = zcdn_destroy(strim(name));
351
352 return rc ? rc : count;
353 }
354
355 static const struct class_attribute class_attr_zcdn_destroy =
356 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
357
zcdn_device_release(struct device * dev)358 static void zcdn_device_release(struct device *dev)
359 {
360 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
361
362 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
363 __func__, MAJOR(dev->devt), MINOR(dev->devt));
364
365 kfree(zcdndev);
366 }
367
zcdn_create(const char * name)368 static int zcdn_create(const char *name)
369 {
370 dev_t devt;
371 int i, rc = 0;
372 struct zcdn_device *zcdndev;
373
374 if (mutex_lock_interruptible(&ap_perms_mutex))
375 return -ERESTARTSYS;
376
377 /* check if device node with this name already exists */
378 if (name[0]) {
379 zcdndev = find_zcdndev_by_name(name);
380 if (zcdndev) {
381 put_device(&zcdndev->device);
382 rc = -EEXIST;
383 goto unlockout;
384 }
385 }
386
387 /* find an unused minor number */
388 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
389 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
390 zcdndev = find_zcdndev_by_devt(devt);
391 if (zcdndev)
392 put_device(&zcdndev->device);
393 else
394 break;
395 }
396 if (i == ZCRYPT_MAX_MINOR_NODES) {
397 rc = -ENOSPC;
398 goto unlockout;
399 }
400
401 /* alloc and prepare a new zcdn device */
402 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
403 if (!zcdndev) {
404 rc = -ENOMEM;
405 goto unlockout;
406 }
407 zcdndev->device.release = zcdn_device_release;
408 zcdndev->device.class = &zcrypt_class;
409 zcdndev->device.devt = devt;
410 zcdndev->device.groups = zcdn_dev_attr_groups;
411 if (name[0])
412 rc = dev_set_name(&zcdndev->device, "%s", name);
413 else
414 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
415 if (rc) {
416 kfree(zcdndev);
417 goto unlockout;
418 }
419 rc = device_register(&zcdndev->device);
420 if (rc) {
421 put_device(&zcdndev->device);
422 goto unlockout;
423 }
424
425 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
426 __func__, MAJOR(devt), MINOR(devt));
427
428 unlockout:
429 mutex_unlock(&ap_perms_mutex);
430 return rc;
431 }
432
zcdn_destroy(const char * name)433 static int zcdn_destroy(const char *name)
434 {
435 int rc = 0;
436 struct zcdn_device *zcdndev;
437
438 if (mutex_lock_interruptible(&ap_perms_mutex))
439 return -ERESTARTSYS;
440
441 /* try to find this zcdn device */
442 zcdndev = find_zcdndev_by_name(name);
443 if (!zcdndev) {
444 rc = -ENOENT;
445 goto unlockout;
446 }
447
448 /*
449 * The zcdn device is not hard destroyed. It is subject to
450 * reference counting and thus just needs to be unregistered.
451 */
452 put_device(&zcdndev->device);
453 device_unregister(&zcdndev->device);
454
455 unlockout:
456 mutex_unlock(&ap_perms_mutex);
457 return rc;
458 }
459
zcdn_destroy_all(void)460 static void zcdn_destroy_all(void)
461 {
462 int i;
463 dev_t devt;
464 struct zcdn_device *zcdndev;
465
466 mutex_lock(&ap_perms_mutex);
467 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
468 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
469 zcdndev = find_zcdndev_by_devt(devt);
470 if (zcdndev) {
471 put_device(&zcdndev->device);
472 device_unregister(&zcdndev->device);
473 }
474 }
475 mutex_unlock(&ap_perms_mutex);
476 }
477
478 /*
479 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
480 *
481 * This function is not supported beyond zcrypt 1.3.1.
482 */
zcrypt_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)483 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
484 size_t count, loff_t *f_pos)
485 {
486 return -EPERM;
487 }
488
489 /*
490 * zcrypt_write(): Not allowed.
491 *
492 * Write is not allowed
493 */
zcrypt_write(struct file * filp,const char __user * buf,size_t count,loff_t * f_pos)494 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
495 size_t count, loff_t *f_pos)
496 {
497 return -EPERM;
498 }
499
500 /*
501 * zcrypt_open(): Count number of users.
502 *
503 * Device open function to count number of users.
504 */
zcrypt_open(struct inode * inode,struct file * filp)505 static int zcrypt_open(struct inode *inode, struct file *filp)
506 {
507 struct ap_perms *perms = &ap_perms;
508
509 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
510 struct zcdn_device *zcdndev;
511
512 if (mutex_lock_interruptible(&ap_perms_mutex))
513 return -ERESTARTSYS;
514 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
515 /* find returns a reference, no get_device() needed */
516 mutex_unlock(&ap_perms_mutex);
517 if (zcdndev)
518 perms = &zcdndev->perms;
519 }
520 filp->private_data = (void *)perms;
521
522 atomic_inc(&zcrypt_open_count);
523 return stream_open(inode, filp);
524 }
525
526 /*
527 * zcrypt_release(): Count number of users.
528 *
529 * Device close function to count number of users.
530 */
zcrypt_release(struct inode * inode,struct file * filp)531 static int zcrypt_release(struct inode *inode, struct file *filp)
532 {
533 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
534 struct zcdn_device *zcdndev;
535
536 mutex_lock(&ap_perms_mutex);
537 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
538 mutex_unlock(&ap_perms_mutex);
539 if (zcdndev) {
540 /* 2 puts here: one for find, one for open */
541 put_device(&zcdndev->device);
542 put_device(&zcdndev->device);
543 }
544 }
545
546 atomic_dec(&zcrypt_open_count);
547 return 0;
548 }
549
zcrypt_check_ioctl(struct ap_perms * perms,unsigned int cmd)550 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
551 unsigned int cmd)
552 {
553 int rc = -EPERM;
554 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
555
556 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
557 if (test_bit_inv(ioctlnr, perms->ioctlm))
558 rc = 0;
559 }
560
561 if (rc)
562 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
563 __func__, ioctlnr, rc);
564
565 return rc;
566 }
567
zcrypt_check_card(struct ap_perms * perms,int card)568 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
569 {
570 return test_bit_inv(card, perms->apm) ? true : false;
571 }
572
zcrypt_check_queue(struct ap_perms * perms,int queue)573 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
574 {
575 return test_bit_inv(queue, perms->aqm) ? true : false;
576 }
577
zcrypt_pick_queue(struct zcrypt_card * zc,struct zcrypt_queue * zq,struct module ** pmod,unsigned int weight)578 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
579 struct zcrypt_queue *zq,
580 struct module **pmod,
581 unsigned int weight)
582 {
583 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
584 return NULL;
585 zcrypt_card_get(zc);
586 zcrypt_queue_get(zq);
587 get_device(&zq->queue->ap_dev.device);
588 atomic_add(weight, &zc->load);
589 atomic_add(weight, &zq->load);
590 zq->request_count++;
591 *pmod = zq->queue->ap_dev.device.driver->owner;
592 return zq;
593 }
594
zcrypt_drop_queue(struct zcrypt_card * zc,struct zcrypt_queue * zq,struct module * mod,unsigned int weight)595 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
596 struct zcrypt_queue *zq,
597 struct module *mod,
598 unsigned int weight)
599 {
600 zq->request_count--;
601 atomic_sub(weight, &zc->load);
602 atomic_sub(weight, &zq->load);
603 put_device(&zq->queue->ap_dev.device);
604 zcrypt_queue_put(zq);
605 zcrypt_card_put(zc);
606 module_put(mod);
607 }
608
zcrypt_card_compare(struct zcrypt_card * zc,struct zcrypt_card * pref_zc,unsigned int weight,unsigned int pref_weight)609 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
610 struct zcrypt_card *pref_zc,
611 unsigned int weight,
612 unsigned int pref_weight)
613 {
614 if (!pref_zc)
615 return true;
616 weight += atomic_read(&zc->load);
617 pref_weight += atomic_read(&pref_zc->load);
618 if (weight == pref_weight)
619 return atomic64_read(&zc->card->total_request_count) <
620 atomic64_read(&pref_zc->card->total_request_count);
621 return weight < pref_weight;
622 }
623
zcrypt_queue_compare(struct zcrypt_queue * zq,struct zcrypt_queue * pref_zq,unsigned int weight,unsigned int pref_weight)624 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
625 struct zcrypt_queue *pref_zq,
626 unsigned int weight,
627 unsigned int pref_weight)
628 {
629 if (!pref_zq)
630 return true;
631 weight += atomic_read(&zq->load);
632 pref_weight += atomic_read(&pref_zq->load);
633 if (weight == pref_weight)
634 return zq->queue->total_request_count <
635 pref_zq->queue->total_request_count;
636 return weight < pref_weight;
637 }
638
639 /*
640 * zcrypt ioctls.
641 */
zcrypt_rsa_modexpo(struct ap_perms * perms,struct zcrypt_track * tr,struct ica_rsa_modexpo * mex)642 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
643 struct zcrypt_track *tr,
644 struct ica_rsa_modexpo *mex)
645 {
646 struct zcrypt_card *zc, *pref_zc;
647 struct zcrypt_queue *zq, *pref_zq;
648 struct ap_message ap_msg;
649 unsigned int wgt = 0, pref_wgt = 0;
650 unsigned int func_code = 0;
651 int cpen, qpen, qid = 0, rc;
652 struct module *mod;
653
654 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
655
656 rc = ap_init_apmsg(&ap_msg, 0);
657 if (rc)
658 goto out;
659
660 if (mex->outputdatalength < mex->inputdatalength) {
661 rc = -EINVAL;
662 goto out;
663 }
664
665 /*
666 * As long as outputdatalength is big enough, we can set the
667 * outputdatalength equal to the inputdatalength, since that is the
668 * number of bytes we will copy in any case
669 */
670 mex->outputdatalength = mex->inputdatalength;
671
672 rc = get_rsa_modex_fc(mex, &func_code);
673 if (rc)
674 goto out;
675
676 pref_zc = NULL;
677 pref_zq = NULL;
678 spin_lock(&zcrypt_list_lock);
679 for_each_zcrypt_card(zc) {
680 /* Check for usable accelerator or CCA card */
681 if (!zc->online || !zc->card->config || zc->card->chkstop ||
682 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
683 continue;
684 /* Check for size limits */
685 if (zc->min_mod_size > mex->inputdatalength ||
686 zc->max_mod_size < mex->inputdatalength)
687 continue;
688 /* check if device node has admission for this card */
689 if (!zcrypt_check_card(perms, zc->card->id))
690 continue;
691 /* get weight index of the card device */
692 wgt = zc->speed_rating[func_code];
693 /* penalty if this msg was previously sent via this card */
694 cpen = (tr && tr->again_counter && tr->last_qid &&
695 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
696 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
697 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
698 continue;
699 for_each_zcrypt_queue(zq, zc) {
700 /* check if device is usable and eligible */
701 if (!zq->online || !zq->ops->rsa_modexpo ||
702 !ap_queue_usable(zq->queue))
703 continue;
704 /* check if device node has admission for this queue */
705 if (!zcrypt_check_queue(perms,
706 AP_QID_QUEUE(zq->queue->qid)))
707 continue;
708 /* penalty if the msg was previously sent at this qid */
709 qpen = (tr && tr->again_counter && tr->last_qid &&
710 tr->last_qid == zq->queue->qid) ?
711 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
712 if (!zcrypt_queue_compare(zq, pref_zq,
713 wgt + cpen + qpen, pref_wgt))
714 continue;
715 pref_zc = zc;
716 pref_zq = zq;
717 pref_wgt = wgt + cpen + qpen;
718 }
719 }
720 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
721 spin_unlock(&zcrypt_list_lock);
722
723 if (!pref_zq) {
724 pr_debug("no matching queue found => ENODEV\n");
725 rc = -ENODEV;
726 goto out;
727 }
728
729 qid = pref_zq->queue->qid;
730 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
731
732 spin_lock(&zcrypt_list_lock);
733 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
734 spin_unlock(&zcrypt_list_lock);
735
736 out:
737 ap_release_apmsg(&ap_msg);
738 if (tr) {
739 tr->last_rc = rc;
740 tr->last_qid = qid;
741 }
742 trace_s390_zcrypt_rep(mex, func_code, rc,
743 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
744 return rc;
745 }
746
zcrypt_rsa_crt(struct ap_perms * perms,struct zcrypt_track * tr,struct ica_rsa_modexpo_crt * crt)747 static long zcrypt_rsa_crt(struct ap_perms *perms,
748 struct zcrypt_track *tr,
749 struct ica_rsa_modexpo_crt *crt)
750 {
751 struct zcrypt_card *zc, *pref_zc;
752 struct zcrypt_queue *zq, *pref_zq;
753 struct ap_message ap_msg;
754 unsigned int wgt = 0, pref_wgt = 0;
755 unsigned int func_code = 0;
756 int cpen, qpen, qid = 0, rc;
757 struct module *mod;
758
759 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
760
761 rc = ap_init_apmsg(&ap_msg, 0);
762 if (rc)
763 goto out;
764
765 if (crt->outputdatalength < crt->inputdatalength) {
766 rc = -EINVAL;
767 goto out;
768 }
769
770 /*
771 * As long as outputdatalength is big enough, we can set the
772 * outputdatalength equal to the inputdatalength, since that is the
773 * number of bytes we will copy in any case
774 */
775 crt->outputdatalength = crt->inputdatalength;
776
777 rc = get_rsa_crt_fc(crt, &func_code);
778 if (rc)
779 goto out;
780
781 pref_zc = NULL;
782 pref_zq = NULL;
783 spin_lock(&zcrypt_list_lock);
784 for_each_zcrypt_card(zc) {
785 /* Check for usable accelerator or CCA card */
786 if (!zc->online || !zc->card->config || zc->card->chkstop ||
787 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
788 continue;
789 /* Check for size limits */
790 if (zc->min_mod_size > crt->inputdatalength ||
791 zc->max_mod_size < crt->inputdatalength)
792 continue;
793 /* check if device node has admission for this card */
794 if (!zcrypt_check_card(perms, zc->card->id))
795 continue;
796 /* get weight index of the card device */
797 wgt = zc->speed_rating[func_code];
798 /* penalty if this msg was previously sent via this card */
799 cpen = (tr && tr->again_counter && tr->last_qid &&
800 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
801 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
802 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
803 continue;
804 for_each_zcrypt_queue(zq, zc) {
805 /* check if device is usable and eligible */
806 if (!zq->online || !zq->ops->rsa_modexpo_crt ||
807 !ap_queue_usable(zq->queue))
808 continue;
809 /* check if device node has admission for this queue */
810 if (!zcrypt_check_queue(perms,
811 AP_QID_QUEUE(zq->queue->qid)))
812 continue;
813 /* penalty if the msg was previously sent at this qid */
814 qpen = (tr && tr->again_counter && tr->last_qid &&
815 tr->last_qid == zq->queue->qid) ?
816 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
817 if (!zcrypt_queue_compare(zq, pref_zq,
818 wgt + cpen + qpen, pref_wgt))
819 continue;
820 pref_zc = zc;
821 pref_zq = zq;
822 pref_wgt = wgt + cpen + qpen;
823 }
824 }
825 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
826 spin_unlock(&zcrypt_list_lock);
827
828 if (!pref_zq) {
829 pr_debug("no matching queue found => ENODEV\n");
830 rc = -ENODEV;
831 goto out;
832 }
833
834 qid = pref_zq->queue->qid;
835 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
836
837 spin_lock(&zcrypt_list_lock);
838 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
839 spin_unlock(&zcrypt_list_lock);
840
841 out:
842 ap_release_apmsg(&ap_msg);
843 if (tr) {
844 tr->last_rc = rc;
845 tr->last_qid = qid;
846 }
847 trace_s390_zcrypt_rep(crt, func_code, rc,
848 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
849 return rc;
850 }
851
_zcrypt_send_cprb(u32 xflags,struct ap_perms * perms,struct zcrypt_track * tr,struct ica_xcRB * xcrb)852 static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms,
853 struct zcrypt_track *tr,
854 struct ica_xcRB *xcrb)
855 {
856 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
857 struct zcrypt_card *zc, *pref_zc;
858 struct zcrypt_queue *zq, *pref_zq;
859 struct ap_message ap_msg;
860 unsigned int wgt = 0, pref_wgt = 0;
861 unsigned int func_code = 0;
862 unsigned short *domain, tdom;
863 int cpen, qpen, qid = 0, rc;
864 struct module *mod;
865
866 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
867
868 xcrb->status = 0;
869
870 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
871 AP_MSG_FLAG_MEMPOOL : 0);
872 if (rc)
873 goto out;
874
875 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
876 if (rc)
877 goto out;
878 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
879 ap_msg.msg, ap_msg.len, false);
880
881 tdom = *domain;
882 if (perms != &ap_perms && tdom < AP_DOMAINS) {
883 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
884 if (!test_bit_inv(tdom, perms->adm)) {
885 rc = -ENODEV;
886 goto out;
887 }
888 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
889 rc = -EOPNOTSUPP;
890 goto out;
891 }
892 }
893 /*
894 * If a valid target domain is set and this domain is NOT a usage
895 * domain but a control only domain, autoselect target domain.
896 */
897 if (tdom < AP_DOMAINS &&
898 !ap_test_config_usage_domain(tdom) &&
899 ap_test_config_ctrl_domain(tdom))
900 tdom = AUTOSEL_DOM;
901
902 pref_zc = NULL;
903 pref_zq = NULL;
904 spin_lock(&zcrypt_list_lock);
905 for_each_zcrypt_card(zc) {
906 /* Check for usable CCA card */
907 if (!zc->online || !zc->card->config || zc->card->chkstop ||
908 !zc->card->hwinfo.cca)
909 continue;
910 /* Check for user selected CCA card */
911 if (xcrb->user_defined != AUTOSELECT &&
912 xcrb->user_defined != zc->card->id)
913 continue;
914 /* check if request size exceeds card max msg size */
915 if (ap_msg.len > zc->card->maxmsgsize)
916 continue;
917 /* check if device node has admission for this card */
918 if (!zcrypt_check_card(perms, zc->card->id))
919 continue;
920 /* get weight index of the card device */
921 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
922 /* penalty if this msg was previously sent via this card */
923 cpen = (tr && tr->again_counter && tr->last_qid &&
924 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
925 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
926 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
927 continue;
928 for_each_zcrypt_queue(zq, zc) {
929 /* check for device usable and eligible */
930 if (!zq->online || !zq->ops->send_cprb ||
931 !ap_queue_usable(zq->queue) ||
932 (tdom != AUTOSEL_DOM &&
933 tdom != AP_QID_QUEUE(zq->queue->qid)))
934 continue;
935 /* check if device node has admission for this queue */
936 if (!zcrypt_check_queue(perms,
937 AP_QID_QUEUE(zq->queue->qid)))
938 continue;
939 /* penalty if the msg was previously sent at this qid */
940 qpen = (tr && tr->again_counter && tr->last_qid &&
941 tr->last_qid == zq->queue->qid) ?
942 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
943 if (!zcrypt_queue_compare(zq, pref_zq,
944 wgt + cpen + qpen, pref_wgt))
945 continue;
946 pref_zc = zc;
947 pref_zq = zq;
948 pref_wgt = wgt + cpen + qpen;
949 }
950 }
951 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
952 spin_unlock(&zcrypt_list_lock);
953
954 if (!pref_zq) {
955 pr_debug("no match for address %02x.%04x => ENODEV\n",
956 xcrb->user_defined, *domain);
957 rc = -ENODEV;
958 goto out;
959 }
960
961 /* in case of auto select, provide the correct domain */
962 qid = pref_zq->queue->qid;
963 if (*domain == AUTOSEL_DOM)
964 *domain = AP_QID_QUEUE(qid);
965
966 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
967 if (!rc) {
968 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
969 ap_msg.msg, ap_msg.len, false);
970 }
971
972 spin_lock(&zcrypt_list_lock);
973 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
974 spin_unlock(&zcrypt_list_lock);
975
976 out:
977 ap_release_apmsg(&ap_msg);
978 if (tr) {
979 tr->last_rc = rc;
980 tr->last_qid = qid;
981 }
982 trace_s390_zcrypt_rep(xcrb, func_code, rc,
983 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
984 return rc;
985 }
986
zcrypt_send_cprb(struct ica_xcRB * xcrb,u32 xflags)987 long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
988 {
989 struct zcrypt_track tr;
990 int rc;
991
992 memset(&tr, 0, sizeof(tr));
993
994 do {
995 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
996 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
997
998 /* on ENODEV failure: retry once again after a requested rescan */
999 if (rc == -ENODEV && zcrypt_process_rescan())
1000 do {
1001 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
1002 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1003 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1004 rc = -EIO;
1005 if (rc)
1006 pr_debug("rc=%d\n", rc);
1007
1008 return rc;
1009 }
1010 EXPORT_SYMBOL(zcrypt_send_cprb);
1011
is_desired_ep11_card(unsigned int dev_id,unsigned short target_num,struct ep11_target_dev * targets)1012 static bool is_desired_ep11_card(unsigned int dev_id,
1013 unsigned short target_num,
1014 struct ep11_target_dev *targets)
1015 {
1016 while (target_num-- > 0) {
1017 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1018 return true;
1019 targets++;
1020 }
1021 return false;
1022 }
1023
is_desired_ep11_queue(unsigned int dev_qid,unsigned short target_num,struct ep11_target_dev * targets)1024 static bool is_desired_ep11_queue(unsigned int dev_qid,
1025 unsigned short target_num,
1026 struct ep11_target_dev *targets)
1027 {
1028 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1029
1030 while (target_num-- > 0) {
1031 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1032 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1033 return true;
1034 targets++;
1035 }
1036 return false;
1037 }
1038
_zcrypt_send_ep11_cprb(u32 xflags,struct ap_perms * perms,struct zcrypt_track * tr,struct ep11_urb * xcrb)1039 static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms,
1040 struct zcrypt_track *tr,
1041 struct ep11_urb *xcrb)
1042 {
1043 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
1044 struct zcrypt_card *zc, *pref_zc;
1045 struct zcrypt_queue *zq, *pref_zq;
1046 struct ep11_target_dev *targets = NULL;
1047 unsigned short target_num;
1048 unsigned int wgt = 0, pref_wgt = 0;
1049 unsigned int func_code = 0, domain;
1050 struct ap_message ap_msg;
1051 int cpen, qpen, qid = 0, rc;
1052 struct module *mod;
1053
1054 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1055
1056 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
1057 AP_MSG_FLAG_MEMPOOL : 0);
1058 if (rc)
1059 goto out;
1060
1061 target_num = (unsigned short)xcrb->targets_num;
1062
1063 /* empty list indicates autoselect (all available targets) */
1064 rc = -ENOMEM;
1065 if (target_num != 0) {
1066 if (userspace) {
1067 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1068 if (!targets)
1069 goto out;
1070 if (copy_from_user(targets, xcrb->targets,
1071 target_num * sizeof(*targets))) {
1072 rc = -EFAULT;
1073 goto out;
1074 }
1075 } else {
1076 targets = (struct ep11_target_dev __force __kernel *)xcrb->targets;
1077 }
1078 }
1079
1080 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1081 if (rc)
1082 goto out;
1083 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
1084 ap_msg.msg, ap_msg.len, false);
1085
1086 if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1087 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1088 if (!test_bit_inv(domain, perms->adm)) {
1089 rc = -ENODEV;
1090 goto out;
1091 }
1092 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1093 rc = -EOPNOTSUPP;
1094 goto out;
1095 }
1096 }
1097
1098 pref_zc = NULL;
1099 pref_zq = NULL;
1100 spin_lock(&zcrypt_list_lock);
1101 for_each_zcrypt_card(zc) {
1102 /* Check for usable EP11 card */
1103 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1104 !zc->card->hwinfo.ep11)
1105 continue;
1106 /* Check for user selected EP11 card */
1107 if (targets &&
1108 !is_desired_ep11_card(zc->card->id, target_num, targets))
1109 continue;
1110 /* check if request size exceeds card max msg size */
1111 if (ap_msg.len > zc->card->maxmsgsize)
1112 continue;
1113 /* check if device node has admission for this card */
1114 if (!zcrypt_check_card(perms, zc->card->id))
1115 continue;
1116 /* get weight index of the card device */
1117 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1118 /* penalty if this msg was previously sent via this card */
1119 cpen = (tr && tr->again_counter && tr->last_qid &&
1120 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1121 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1122 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1123 continue;
1124 for_each_zcrypt_queue(zq, zc) {
1125 /* check if device is usable and eligible */
1126 if (!zq->online || !zq->ops->send_ep11_cprb ||
1127 !ap_queue_usable(zq->queue) ||
1128 (targets &&
1129 !is_desired_ep11_queue(zq->queue->qid,
1130 target_num, targets)))
1131 continue;
1132 /* check if device node has admission for this queue */
1133 if (!zcrypt_check_queue(perms,
1134 AP_QID_QUEUE(zq->queue->qid)))
1135 continue;
1136 /* penalty if the msg was previously sent at this qid */
1137 qpen = (tr && tr->again_counter && tr->last_qid &&
1138 tr->last_qid == zq->queue->qid) ?
1139 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1140 if (!zcrypt_queue_compare(zq, pref_zq,
1141 wgt + cpen + qpen, pref_wgt))
1142 continue;
1143 pref_zc = zc;
1144 pref_zq = zq;
1145 pref_wgt = wgt + cpen + qpen;
1146 }
1147 }
1148 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1149 spin_unlock(&zcrypt_list_lock);
1150
1151 if (!pref_zq) {
1152 if (targets && target_num == 1) {
1153 pr_debug("no match for address %02x.%04x => ENODEV\n",
1154 (int)targets->ap_id, (int)targets->dom_id);
1155 } else if (targets) {
1156 pr_debug("no match for %d target addrs => ENODEV\n",
1157 (int)target_num);
1158 } else {
1159 pr_debug("no match for address ff.ffff => ENODEV\n");
1160 }
1161 rc = -ENODEV;
1162 goto out;
1163 }
1164
1165 qid = pref_zq->queue->qid;
1166 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1167 if (!rc) {
1168 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
1169 ap_msg.msg, ap_msg.len, false);
1170 }
1171
1172 spin_lock(&zcrypt_list_lock);
1173 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1174 spin_unlock(&zcrypt_list_lock);
1175
1176 out:
1177 if (userspace)
1178 kfree(targets);
1179 ap_release_apmsg(&ap_msg);
1180 if (tr) {
1181 tr->last_rc = rc;
1182 tr->last_qid = qid;
1183 }
1184 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1185 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1186 return rc;
1187 }
1188
zcrypt_send_ep11_cprb(struct ep11_urb * xcrb,u32 xflags)1189 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
1190 {
1191 struct zcrypt_track tr;
1192 int rc;
1193
1194 memset(&tr, 0, sizeof(tr));
1195
1196 do {
1197 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1198 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1199
1200 /* on ENODEV failure: retry once again after a requested rescan */
1201 if (rc == -ENODEV && zcrypt_process_rescan())
1202 do {
1203 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1204 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1205 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1206 rc = -EIO;
1207 if (rc)
1208 pr_debug("rc=%d\n", rc);
1209
1210 return rc;
1211 }
1212 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1213
zcrypt_rng(char * buffer)1214 static long zcrypt_rng(char *buffer)
1215 {
1216 struct zcrypt_card *zc, *pref_zc;
1217 struct zcrypt_queue *zq, *pref_zq;
1218 unsigned int wgt = 0, pref_wgt = 0;
1219 unsigned int func_code = 0;
1220 struct ap_message ap_msg;
1221 unsigned int domain;
1222 int qid = 0, rc = -ENODEV;
1223 struct module *mod;
1224
1225 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1226
1227 rc = ap_init_apmsg(&ap_msg, 0);
1228 if (rc)
1229 goto out;
1230 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1231 if (rc)
1232 goto out;
1233
1234 pref_zc = NULL;
1235 pref_zq = NULL;
1236 spin_lock(&zcrypt_list_lock);
1237 for_each_zcrypt_card(zc) {
1238 /* Check for usable CCA card */
1239 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1240 !zc->card->hwinfo.cca)
1241 continue;
1242 /* get weight index of the card device */
1243 wgt = zc->speed_rating[func_code];
1244 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1245 continue;
1246 for_each_zcrypt_queue(zq, zc) {
1247 /* check if device is usable and eligible */
1248 if (!zq->online || !zq->ops->rng ||
1249 !ap_queue_usable(zq->queue))
1250 continue;
1251 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1252 continue;
1253 pref_zc = zc;
1254 pref_zq = zq;
1255 pref_wgt = wgt;
1256 }
1257 }
1258 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1259 spin_unlock(&zcrypt_list_lock);
1260
1261 if (!pref_zq) {
1262 pr_debug("no matching queue found => ENODEV\n");
1263 rc = -ENODEV;
1264 goto out;
1265 }
1266
1267 qid = pref_zq->queue->qid;
1268 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1269
1270 spin_lock(&zcrypt_list_lock);
1271 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1272 spin_unlock(&zcrypt_list_lock);
1273
1274 out:
1275 ap_release_apmsg(&ap_msg);
1276 trace_s390_zcrypt_rep(buffer, func_code, rc,
1277 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1278 return rc;
1279 }
1280
zcrypt_device_status_mask(struct zcrypt_device_status * devstatus)1281 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1282 {
1283 struct zcrypt_card *zc;
1284 struct zcrypt_queue *zq;
1285 struct zcrypt_device_status *stat;
1286 int card, queue;
1287
1288 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1289 * sizeof(struct zcrypt_device_status));
1290
1291 spin_lock(&zcrypt_list_lock);
1292 for_each_zcrypt_card(zc) {
1293 for_each_zcrypt_queue(zq, zc) {
1294 card = AP_QID_CARD(zq->queue->qid);
1295 if (card >= MAX_ZDEV_CARDIDS)
1296 continue;
1297 queue = AP_QID_QUEUE(zq->queue->qid);
1298 stat = &devstatus[card * AP_DOMAINS + queue];
1299 stat->hwtype = zc->card->ap_dev.device_type;
1300 stat->functions = zc->card->hwinfo.fac >> 26;
1301 stat->qid = zq->queue->qid;
1302 stat->online = zq->online ? 0x01 : 0x00;
1303 }
1304 }
1305 spin_unlock(&zcrypt_list_lock);
1306 }
1307
zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext * devstatus,int maxcard,int maxqueue)1308 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
1309 int maxcard, int maxqueue)
1310 {
1311 struct zcrypt_card *zc;
1312 struct zcrypt_queue *zq;
1313 struct zcrypt_device_status_ext *stat;
1314 int card, queue;
1315
1316 maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT);
1317 maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT);
1318
1319 spin_lock(&zcrypt_list_lock);
1320 for_each_zcrypt_card(zc) {
1321 for_each_zcrypt_queue(zq, zc) {
1322 card = AP_QID_CARD(zq->queue->qid);
1323 queue = AP_QID_QUEUE(zq->queue->qid);
1324 if (card >= maxcard || queue >= maxqueue)
1325 continue;
1326 stat = &devstatus[card * maxqueue + queue];
1327 stat->hwtype = zc->card->ap_dev.device_type;
1328 stat->functions = zc->card->hwinfo.fac >> 26;
1329 stat->qid = zq->queue->qid;
1330 stat->online = zq->online ? 0x01 : 0x00;
1331 }
1332 }
1333 spin_unlock(&zcrypt_list_lock);
1334 }
1335 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1336
zcrypt_device_status_ext(int card,int queue,struct zcrypt_device_status_ext * devstat)1337 int zcrypt_device_status_ext(int card, int queue,
1338 struct zcrypt_device_status_ext *devstat)
1339 {
1340 struct zcrypt_card *zc;
1341 struct zcrypt_queue *zq;
1342
1343 memset(devstat, 0, sizeof(*devstat));
1344
1345 spin_lock(&zcrypt_list_lock);
1346 for_each_zcrypt_card(zc) {
1347 for_each_zcrypt_queue(zq, zc) {
1348 if (card == AP_QID_CARD(zq->queue->qid) &&
1349 queue == AP_QID_QUEUE(zq->queue->qid)) {
1350 devstat->hwtype = zc->card->ap_dev.device_type;
1351 devstat->functions = zc->card->hwinfo.fac >> 26;
1352 devstat->qid = zq->queue->qid;
1353 devstat->online = zq->online ? 0x01 : 0x00;
1354 spin_unlock(&zcrypt_list_lock);
1355 return 0;
1356 }
1357 }
1358 }
1359 spin_unlock(&zcrypt_list_lock);
1360
1361 return -ENODEV;
1362 }
1363 EXPORT_SYMBOL(zcrypt_device_status_ext);
1364
zcrypt_status_mask(char status[],size_t max_adapters)1365 static void zcrypt_status_mask(char status[], size_t max_adapters)
1366 {
1367 struct zcrypt_card *zc;
1368 struct zcrypt_queue *zq;
1369 int card;
1370
1371 memset(status, 0, max_adapters);
1372 spin_lock(&zcrypt_list_lock);
1373 for_each_zcrypt_card(zc) {
1374 for_each_zcrypt_queue(zq, zc) {
1375 card = AP_QID_CARD(zq->queue->qid);
1376 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1377 card >= max_adapters)
1378 continue;
1379 status[card] = zc->online ? zc->user_space_type : 0x0d;
1380 }
1381 }
1382 spin_unlock(&zcrypt_list_lock);
1383 }
1384
zcrypt_qdepth_mask(char qdepth[],size_t max_adapters)1385 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1386 {
1387 struct zcrypt_card *zc;
1388 struct zcrypt_queue *zq;
1389 int card;
1390
1391 memset(qdepth, 0, max_adapters);
1392 spin_lock(&zcrypt_list_lock);
1393 local_bh_disable();
1394 for_each_zcrypt_card(zc) {
1395 for_each_zcrypt_queue(zq, zc) {
1396 card = AP_QID_CARD(zq->queue->qid);
1397 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1398 card >= max_adapters)
1399 continue;
1400 spin_lock(&zq->queue->lock);
1401 qdepth[card] =
1402 zq->queue->pendingq_count +
1403 zq->queue->requestq_count;
1404 spin_unlock(&zq->queue->lock);
1405 }
1406 }
1407 local_bh_enable();
1408 spin_unlock(&zcrypt_list_lock);
1409 }
1410
zcrypt_perdev_reqcnt(u32 reqcnt[],size_t max_adapters)1411 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1412 {
1413 struct zcrypt_card *zc;
1414 struct zcrypt_queue *zq;
1415 int card;
1416 u64 cnt;
1417
1418 memset(reqcnt, 0, sizeof(int) * max_adapters);
1419 spin_lock(&zcrypt_list_lock);
1420 local_bh_disable();
1421 for_each_zcrypt_card(zc) {
1422 for_each_zcrypt_queue(zq, zc) {
1423 card = AP_QID_CARD(zq->queue->qid);
1424 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1425 card >= max_adapters)
1426 continue;
1427 spin_lock(&zq->queue->lock);
1428 cnt = zq->queue->total_request_count;
1429 spin_unlock(&zq->queue->lock);
1430 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1431 }
1432 }
1433 local_bh_enable();
1434 spin_unlock(&zcrypt_list_lock);
1435 }
1436
zcrypt_pendingq_count(void)1437 static int zcrypt_pendingq_count(void)
1438 {
1439 struct zcrypt_card *zc;
1440 struct zcrypt_queue *zq;
1441 int pendingq_count;
1442
1443 pendingq_count = 0;
1444 spin_lock(&zcrypt_list_lock);
1445 local_bh_disable();
1446 for_each_zcrypt_card(zc) {
1447 for_each_zcrypt_queue(zq, zc) {
1448 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1449 continue;
1450 spin_lock(&zq->queue->lock);
1451 pendingq_count += zq->queue->pendingq_count;
1452 spin_unlock(&zq->queue->lock);
1453 }
1454 }
1455 local_bh_enable();
1456 spin_unlock(&zcrypt_list_lock);
1457 return pendingq_count;
1458 }
1459
zcrypt_requestq_count(void)1460 static int zcrypt_requestq_count(void)
1461 {
1462 struct zcrypt_card *zc;
1463 struct zcrypt_queue *zq;
1464 int requestq_count;
1465
1466 requestq_count = 0;
1467 spin_lock(&zcrypt_list_lock);
1468 local_bh_disable();
1469 for_each_zcrypt_card(zc) {
1470 for_each_zcrypt_queue(zq, zc) {
1471 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1472 continue;
1473 spin_lock(&zq->queue->lock);
1474 requestq_count += zq->queue->requestq_count;
1475 spin_unlock(&zq->queue->lock);
1476 }
1477 }
1478 local_bh_enable();
1479 spin_unlock(&zcrypt_list_lock);
1480 return requestq_count;
1481 }
1482
icarsamodexpo_ioctl(struct ap_perms * perms,unsigned long arg)1483 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1484 {
1485 int rc;
1486 struct zcrypt_track tr;
1487 struct ica_rsa_modexpo mex;
1488 struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1489
1490 memset(&tr, 0, sizeof(tr));
1491 if (copy_from_user(&mex, umex, sizeof(mex)))
1492 return -EFAULT;
1493
1494 do {
1495 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1496 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1497
1498 /* on ENODEV failure: retry once again after a requested rescan */
1499 if (rc == -ENODEV && zcrypt_process_rescan())
1500 do {
1501 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1502 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1503 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1504 rc = -EIO;
1505 if (rc) {
1506 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
1507 return rc;
1508 }
1509 return put_user(mex.outputdatalength, &umex->outputdatalength);
1510 }
1511
icarsacrt_ioctl(struct ap_perms * perms,unsigned long arg)1512 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1513 {
1514 int rc;
1515 struct zcrypt_track tr;
1516 struct ica_rsa_modexpo_crt crt;
1517 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1518
1519 memset(&tr, 0, sizeof(tr));
1520 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1521 return -EFAULT;
1522
1523 do {
1524 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1525 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1526
1527 /* on ENODEV failure: retry once again after a requested rescan */
1528 if (rc == -ENODEV && zcrypt_process_rescan())
1529 do {
1530 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1531 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1532 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1533 rc = -EIO;
1534 if (rc) {
1535 pr_debug("ioctl ICARSACRT rc=%d\n", rc);
1536 return rc;
1537 }
1538 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1539 }
1540
zsecsendcprb_ioctl(struct ap_perms * perms,unsigned long arg)1541 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1542 {
1543 int rc;
1544 struct ica_xcRB xcrb;
1545 struct zcrypt_track tr;
1546 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1547 struct ica_xcRB __user *uxcrb = (void __user *)arg;
1548
1549 memset(&tr, 0, sizeof(tr));
1550 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1551 return -EFAULT;
1552
1553 do {
1554 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1555 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1556
1557 /* on ENODEV failure: retry once again after a requested rescan */
1558 if (rc == -ENODEV && zcrypt_process_rescan())
1559 do {
1560 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1561 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1562 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1563 rc = -EIO;
1564 if (rc)
1565 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1566 rc, xcrb.status);
1567 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1568 return -EFAULT;
1569 return rc;
1570 }
1571
zsendep11cprb_ioctl(struct ap_perms * perms,unsigned long arg)1572 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1573 {
1574 int rc;
1575 struct ep11_urb xcrb;
1576 struct zcrypt_track tr;
1577 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1578 struct ep11_urb __user *uxcrb = (void __user *)arg;
1579
1580 memset(&tr, 0, sizeof(tr));
1581 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1582 return -EFAULT;
1583
1584 do {
1585 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1586 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1587
1588 /* on ENODEV failure: retry once again after a requested rescan */
1589 if (rc == -ENODEV && zcrypt_process_rescan())
1590 do {
1591 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1592 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1593 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1594 rc = -EIO;
1595 if (rc)
1596 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1597 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1598 return -EFAULT;
1599 return rc;
1600 }
1601
zcrypt_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1602 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1603 unsigned long arg)
1604 {
1605 int rc;
1606 struct ap_perms *perms =
1607 (struct ap_perms *)filp->private_data;
1608
1609 rc = zcrypt_check_ioctl(perms, cmd);
1610 if (rc)
1611 return rc;
1612
1613 switch (cmd) {
1614 case ICARSAMODEXPO:
1615 return icarsamodexpo_ioctl(perms, arg);
1616 case ICARSACRT:
1617 return icarsacrt_ioctl(perms, arg);
1618 case ZSECSENDCPRB:
1619 return zsecsendcprb_ioctl(perms, arg);
1620 case ZSENDEP11CPRB:
1621 return zsendep11cprb_ioctl(perms, arg);
1622 case ZCRYPT_DEVICE_STATUS: {
1623 struct zcrypt_device_status_ext *device_status;
1624 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1625 * sizeof(struct zcrypt_device_status_ext);
1626
1627 device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
1628 sizeof(struct zcrypt_device_status_ext),
1629 GFP_KERNEL);
1630 if (!device_status)
1631 return -ENOMEM;
1632 zcrypt_device_status_mask_ext(device_status,
1633 MAX_ZDEV_CARDIDS_EXT,
1634 MAX_ZDEV_DOMAINS_EXT);
1635 if (copy_to_user((char __user *)arg, device_status,
1636 total_size))
1637 rc = -EFAULT;
1638 kvfree(device_status);
1639 return rc;
1640 }
1641 case ZCRYPT_STATUS_MASK: {
1642 char status[AP_DEVICES];
1643
1644 zcrypt_status_mask(status, AP_DEVICES);
1645 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1646 return -EFAULT;
1647 return 0;
1648 }
1649 case ZCRYPT_QDEPTH_MASK: {
1650 char qdepth[AP_DEVICES];
1651
1652 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1653 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1654 return -EFAULT;
1655 return 0;
1656 }
1657 case ZCRYPT_PERDEV_REQCNT: {
1658 u32 *reqcnt;
1659
1660 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1661 if (!reqcnt)
1662 return -ENOMEM;
1663 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1664 if (copy_to_user((int __user *)arg, reqcnt,
1665 sizeof(u32) * AP_DEVICES))
1666 rc = -EFAULT;
1667 kfree(reqcnt);
1668 return rc;
1669 }
1670 case Z90STAT_REQUESTQ_COUNT:
1671 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1672 case Z90STAT_PENDINGQ_COUNT:
1673 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1674 case Z90STAT_TOTALOPEN_COUNT:
1675 return put_user(atomic_read(&zcrypt_open_count),
1676 (int __user *)arg);
1677 case Z90STAT_DOMAIN_INDEX:
1678 return put_user(ap_domain_index, (int __user *)arg);
1679 /*
1680 * Deprecated ioctls
1681 */
1682 case ZDEVICESTATUS: {
1683 /* the old ioctl supports only 64 adapters */
1684 struct zcrypt_device_status *device_status;
1685 size_t total_size = MAX_ZDEV_ENTRIES
1686 * sizeof(struct zcrypt_device_status);
1687
1688 device_status = kzalloc(total_size, GFP_KERNEL);
1689 if (!device_status)
1690 return -ENOMEM;
1691 zcrypt_device_status_mask(device_status);
1692 if (copy_to_user((char __user *)arg, device_status,
1693 total_size))
1694 rc = -EFAULT;
1695 kfree(device_status);
1696 return rc;
1697 }
1698 case Z90STAT_STATUS_MASK: {
1699 /* the old ioctl supports only 64 adapters */
1700 char status[MAX_ZDEV_CARDIDS];
1701
1702 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1703 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1704 return -EFAULT;
1705 return 0;
1706 }
1707 case Z90STAT_QDEPTH_MASK: {
1708 /* the old ioctl supports only 64 adapters */
1709 char qdepth[MAX_ZDEV_CARDIDS];
1710
1711 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1712 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1713 return -EFAULT;
1714 return 0;
1715 }
1716 case Z90STAT_PERDEV_REQCNT: {
1717 /* the old ioctl supports only 64 adapters */
1718 u32 reqcnt[MAX_ZDEV_CARDIDS];
1719
1720 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1721 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1722 return -EFAULT;
1723 return 0;
1724 }
1725 /* unknown ioctl number */
1726 default:
1727 pr_debug("unknown ioctl 0x%08x\n", cmd);
1728 return -ENOIOCTLCMD;
1729 }
1730 }
1731
1732 #ifdef CONFIG_COMPAT
1733 /*
1734 * ioctl32 conversion routines
1735 */
1736 struct compat_ica_rsa_modexpo {
1737 compat_uptr_t inputdata;
1738 unsigned int inputdatalength;
1739 compat_uptr_t outputdata;
1740 unsigned int outputdatalength;
1741 compat_uptr_t b_key;
1742 compat_uptr_t n_modulus;
1743 };
1744
trans_modexpo32(struct ap_perms * perms,struct file * filp,unsigned int cmd,unsigned long arg)1745 static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1746 unsigned int cmd, unsigned long arg)
1747 {
1748 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1749 struct compat_ica_rsa_modexpo mex32;
1750 struct ica_rsa_modexpo mex64;
1751 struct zcrypt_track tr;
1752 long rc;
1753
1754 memset(&tr, 0, sizeof(tr));
1755 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1756 return -EFAULT;
1757 mex64.inputdata = compat_ptr(mex32.inputdata);
1758 mex64.inputdatalength = mex32.inputdatalength;
1759 mex64.outputdata = compat_ptr(mex32.outputdata);
1760 mex64.outputdatalength = mex32.outputdatalength;
1761 mex64.b_key = compat_ptr(mex32.b_key);
1762 mex64.n_modulus = compat_ptr(mex32.n_modulus);
1763 do {
1764 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1765 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1766
1767 /* on ENODEV failure: retry once again after a requested rescan */
1768 if (rc == -ENODEV && zcrypt_process_rescan())
1769 do {
1770 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1771 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1772 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1773 rc = -EIO;
1774 if (rc)
1775 return rc;
1776 return put_user(mex64.outputdatalength,
1777 &umex32->outputdatalength);
1778 }
1779
1780 struct compat_ica_rsa_modexpo_crt {
1781 compat_uptr_t inputdata;
1782 unsigned int inputdatalength;
1783 compat_uptr_t outputdata;
1784 unsigned int outputdatalength;
1785 compat_uptr_t bp_key;
1786 compat_uptr_t bq_key;
1787 compat_uptr_t np_prime;
1788 compat_uptr_t nq_prime;
1789 compat_uptr_t u_mult_inv;
1790 };
1791
trans_modexpo_crt32(struct ap_perms * perms,struct file * filp,unsigned int cmd,unsigned long arg)1792 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1793 unsigned int cmd, unsigned long arg)
1794 {
1795 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1796 struct compat_ica_rsa_modexpo_crt crt32;
1797 struct ica_rsa_modexpo_crt crt64;
1798 struct zcrypt_track tr;
1799 long rc;
1800
1801 memset(&tr, 0, sizeof(tr));
1802 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1803 return -EFAULT;
1804 crt64.inputdata = compat_ptr(crt32.inputdata);
1805 crt64.inputdatalength = crt32.inputdatalength;
1806 crt64.outputdata = compat_ptr(crt32.outputdata);
1807 crt64.outputdatalength = crt32.outputdatalength;
1808 crt64.bp_key = compat_ptr(crt32.bp_key);
1809 crt64.bq_key = compat_ptr(crt32.bq_key);
1810 crt64.np_prime = compat_ptr(crt32.np_prime);
1811 crt64.nq_prime = compat_ptr(crt32.nq_prime);
1812 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1813 do {
1814 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1815 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1816
1817 /* on ENODEV failure: retry once again after a requested rescan */
1818 if (rc == -ENODEV && zcrypt_process_rescan())
1819 do {
1820 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1821 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1822 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1823 rc = -EIO;
1824 if (rc)
1825 return rc;
1826 return put_user(crt64.outputdatalength,
1827 &ucrt32->outputdatalength);
1828 }
1829
1830 struct compat_ica_xcrb {
1831 unsigned short agent_ID;
1832 unsigned int user_defined;
1833 unsigned short request_ID;
1834 unsigned int request_control_blk_length;
1835 unsigned char padding1[16 - sizeof(compat_uptr_t)];
1836 compat_uptr_t request_control_blk_addr;
1837 unsigned int request_data_length;
1838 char padding2[16 - sizeof(compat_uptr_t)];
1839 compat_uptr_t request_data_address;
1840 unsigned int reply_control_blk_length;
1841 char padding3[16 - sizeof(compat_uptr_t)];
1842 compat_uptr_t reply_control_blk_addr;
1843 unsigned int reply_data_length;
1844 char padding4[16 - sizeof(compat_uptr_t)];
1845 compat_uptr_t reply_data_addr;
1846 unsigned short priority_window;
1847 unsigned int status;
1848 } __packed;
1849
trans_xcrb32(struct ap_perms * perms,struct file * filp,unsigned int cmd,unsigned long arg)1850 static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
1851 unsigned int cmd, unsigned long arg)
1852 {
1853 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
1854 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1855 struct compat_ica_xcrb xcrb32;
1856 struct zcrypt_track tr;
1857 struct ica_xcRB xcrb64;
1858 long rc;
1859
1860 memset(&tr, 0, sizeof(tr));
1861 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
1862 return -EFAULT;
1863 xcrb64.agent_ID = xcrb32.agent_ID;
1864 xcrb64.user_defined = xcrb32.user_defined;
1865 xcrb64.request_ID = xcrb32.request_ID;
1866 xcrb64.request_control_blk_length =
1867 xcrb32.request_control_blk_length;
1868 xcrb64.request_control_blk_addr =
1869 compat_ptr(xcrb32.request_control_blk_addr);
1870 xcrb64.request_data_length =
1871 xcrb32.request_data_length;
1872 xcrb64.request_data_address =
1873 compat_ptr(xcrb32.request_data_address);
1874 xcrb64.reply_control_blk_length =
1875 xcrb32.reply_control_blk_length;
1876 xcrb64.reply_control_blk_addr =
1877 compat_ptr(xcrb32.reply_control_blk_addr);
1878 xcrb64.reply_data_length = xcrb32.reply_data_length;
1879 xcrb64.reply_data_addr =
1880 compat_ptr(xcrb32.reply_data_addr);
1881 xcrb64.priority_window = xcrb32.priority_window;
1882 xcrb64.status = xcrb32.status;
1883 do {
1884 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64);
1885 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1886
1887 /* on ENODEV failure: retry once again after a requested rescan */
1888 if (rc == -ENODEV && zcrypt_process_rescan())
1889 do {
1890 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64);
1891 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1892 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1893 rc = -EIO;
1894 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
1895 xcrb32.reply_data_length = xcrb64.reply_data_length;
1896 xcrb32.status = xcrb64.status;
1897 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
1898 return -EFAULT;
1899 return rc;
1900 }
1901
zcrypt_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1902 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1903 unsigned long arg)
1904 {
1905 int rc;
1906 struct ap_perms *perms =
1907 (struct ap_perms *)filp->private_data;
1908
1909 rc = zcrypt_check_ioctl(perms, cmd);
1910 if (rc)
1911 return rc;
1912
1913 if (cmd == ICARSAMODEXPO)
1914 return trans_modexpo32(perms, filp, cmd, arg);
1915 if (cmd == ICARSACRT)
1916 return trans_modexpo_crt32(perms, filp, cmd, arg);
1917 if (cmd == ZSECSENDCPRB)
1918 return trans_xcrb32(perms, filp, cmd, arg);
1919 return zcrypt_unlocked_ioctl(filp, cmd, arg);
1920 }
1921 #endif
1922
1923 /*
1924 * Misc device file operations.
1925 */
1926 static const struct file_operations zcrypt_fops = {
1927 .owner = THIS_MODULE,
1928 .read = zcrypt_read,
1929 .write = zcrypt_write,
1930 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1931 #ifdef CONFIG_COMPAT
1932 .compat_ioctl = zcrypt_compat_ioctl,
1933 #endif
1934 .open = zcrypt_open,
1935 .release = zcrypt_release,
1936 };
1937
1938 /*
1939 * Misc device.
1940 */
1941 static struct miscdevice zcrypt_misc_device = {
1942 .minor = MISC_DYNAMIC_MINOR,
1943 .name = "z90crypt",
1944 .fops = &zcrypt_fops,
1945 };
1946
1947 static int zcrypt_rng_device_count;
1948 static u32 *zcrypt_rng_buffer;
1949 static int zcrypt_rng_buffer_index;
1950 static DEFINE_MUTEX(zcrypt_rng_mutex);
1951
zcrypt_rng_data_read(struct hwrng * rng,u32 * data)1952 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1953 {
1954 int rc;
1955
1956 /*
1957 * We don't need locking here because the RNG API guarantees serialized
1958 * read method calls.
1959 */
1960 if (zcrypt_rng_buffer_index == 0) {
1961 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1962 /* on ENODEV failure: retry once again after an AP bus rescan */
1963 if (rc == -ENODEV && zcrypt_process_rescan())
1964 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1965 if (rc < 0)
1966 return -EIO;
1967 zcrypt_rng_buffer_index = rc / sizeof(*data);
1968 }
1969 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1970 return sizeof(*data);
1971 }
1972
1973 static struct hwrng zcrypt_rng_dev = {
1974 .name = "zcrypt",
1975 .data_read = zcrypt_rng_data_read,
1976 .quality = 990,
1977 };
1978
zcrypt_rng_device_add(void)1979 int zcrypt_rng_device_add(void)
1980 {
1981 int rc = 0;
1982
1983 mutex_lock(&zcrypt_rng_mutex);
1984 if (zcrypt_rng_device_count == 0) {
1985 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
1986 if (!zcrypt_rng_buffer) {
1987 rc = -ENOMEM;
1988 goto out;
1989 }
1990 zcrypt_rng_buffer_index = 0;
1991 rc = hwrng_register(&zcrypt_rng_dev);
1992 if (rc)
1993 goto out_free;
1994 zcrypt_rng_device_count = 1;
1995 } else {
1996 zcrypt_rng_device_count++;
1997 }
1998 mutex_unlock(&zcrypt_rng_mutex);
1999 return 0;
2000
2001 out_free:
2002 free_page((unsigned long)zcrypt_rng_buffer);
2003 out:
2004 mutex_unlock(&zcrypt_rng_mutex);
2005 return rc;
2006 }
2007
zcrypt_rng_device_remove(void)2008 void zcrypt_rng_device_remove(void)
2009 {
2010 mutex_lock(&zcrypt_rng_mutex);
2011 zcrypt_rng_device_count--;
2012 if (zcrypt_rng_device_count == 0) {
2013 hwrng_unregister(&zcrypt_rng_dev);
2014 free_page((unsigned long)zcrypt_rng_buffer);
2015 }
2016 mutex_unlock(&zcrypt_rng_mutex);
2017 }
2018
2019 /*
2020 * Wait until the zcrypt api is operational.
2021 * The AP bus scan and the binding of ap devices to device drivers is
2022 * an asynchronous job. This function waits until these initial jobs
2023 * are done and so the zcrypt api should be ready to serve crypto
2024 * requests - if there are resources available. The function uses an
2025 * internal timeout of 30s. The very first caller will either wait for
2026 * ap bus bindings complete or the timeout happens. This state will be
2027 * remembered for further callers which will only be blocked until a
2028 * decision is made (timeout or bindings complete).
2029 * On timeout -ETIME is returned, on success the return value is 0.
2030 */
zcrypt_wait_api_operational(void)2031 int zcrypt_wait_api_operational(void)
2032 {
2033 static DEFINE_MUTEX(zcrypt_wait_api_lock);
2034 static int zcrypt_wait_api_state;
2035 int rc;
2036
2037 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
2038 if (rc)
2039 return rc;
2040
2041 switch (zcrypt_wait_api_state) {
2042 case 0:
2043 /* initial state, invoke wait for the ap bus complete */
2044 rc = ap_wait_apqn_bindings_complete(
2045 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
2046 switch (rc) {
2047 case 0:
2048 /* ap bus bindings are complete */
2049 zcrypt_wait_api_state = 1;
2050 break;
2051 case -EINTR:
2052 /* interrupted, go back to caller */
2053 break;
2054 case -ETIME:
2055 /* timeout */
2056 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
2057 __func__);
2058 zcrypt_wait_api_state = -ETIME;
2059 break;
2060 default:
2061 /* other failure */
2062 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
2063 break;
2064 }
2065 break;
2066 case 1:
2067 /* a previous caller already found ap bus bindings complete */
2068 rc = 0;
2069 break;
2070 default:
2071 /* a previous caller had timeout or other failure */
2072 rc = zcrypt_wait_api_state;
2073 break;
2074 }
2075
2076 mutex_unlock(&zcrypt_wait_api_lock);
2077
2078 return rc;
2079 }
2080 EXPORT_SYMBOL(zcrypt_wait_api_operational);
2081
zcrypt_debug_init(void)2082 int __init zcrypt_debug_init(void)
2083 {
2084 zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
2085 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
2086 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
2087 debug_set_level(zcrypt_dbf_info, DBF_ERR);
2088
2089 return 0;
2090 }
2091
zcrypt_debug_exit(void)2092 void zcrypt_debug_exit(void)
2093 {
2094 debug_unregister(zcrypt_dbf_info);
2095 }
2096
zcdn_init(void)2097 static int __init zcdn_init(void)
2098 {
2099 int rc;
2100
2101 /* create a new class 'zcrypt' */
2102 rc = class_register(&zcrypt_class);
2103 if (rc)
2104 goto out_class_register_failed;
2105
2106 /* alloc device minor range */
2107 rc = alloc_chrdev_region(&zcrypt_devt,
2108 0, ZCRYPT_MAX_MINOR_NODES,
2109 ZCRYPT_NAME);
2110 if (rc)
2111 goto out_alloc_chrdev_failed;
2112
2113 cdev_init(&zcrypt_cdev, &zcrypt_fops);
2114 zcrypt_cdev.owner = THIS_MODULE;
2115 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2116 if (rc)
2117 goto out_cdev_add_failed;
2118
2119 /* need some class specific sysfs attributes */
2120 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
2121 if (rc)
2122 goto out_class_create_file_1_failed;
2123 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
2124 if (rc)
2125 goto out_class_create_file_2_failed;
2126
2127 return 0;
2128
2129 out_class_create_file_2_failed:
2130 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
2131 out_class_create_file_1_failed:
2132 cdev_del(&zcrypt_cdev);
2133 out_cdev_add_failed:
2134 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2135 out_alloc_chrdev_failed:
2136 class_unregister(&zcrypt_class);
2137 out_class_register_failed:
2138 return rc;
2139 }
2140
zcdn_exit(void)2141 static void zcdn_exit(void)
2142 {
2143 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
2144 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
2145 zcdn_destroy_all();
2146 cdev_del(&zcrypt_cdev);
2147 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2148 class_unregister(&zcrypt_class);
2149 }
2150
2151 /*
2152 * zcrypt_api_init(): Module initialization.
2153 *
2154 * The module initialization code.
2155 */
zcrypt_api_init(void)2156 int __init zcrypt_api_init(void)
2157 {
2158 int rc;
2159
2160 /* make sure the mempool threshold is >= 1 */
2161 if (zcrypt_mempool_threshold < 1) {
2162 rc = -EINVAL;
2163 goto out;
2164 }
2165
2166 rc = zcrypt_debug_init();
2167 if (rc)
2168 goto out;
2169
2170 rc = zcdn_init();
2171 if (rc)
2172 goto out_zcdn_init_failed;
2173
2174 rc = zcrypt_ccamisc_init();
2175 if (rc)
2176 goto out_ccamisc_init_failed;
2177
2178 rc = zcrypt_ep11misc_init();
2179 if (rc)
2180 goto out_ep11misc_init_failed;
2181
2182 /* Register the request sprayer. */
2183 rc = misc_register(&zcrypt_misc_device);
2184 if (rc < 0)
2185 goto out_misc_register_failed;
2186
2187 zcrypt_msgtype6_init();
2188 zcrypt_msgtype50_init();
2189
2190 return 0;
2191
2192 out_misc_register_failed:
2193 zcrypt_ep11misc_exit();
2194 out_ep11misc_init_failed:
2195 zcrypt_ccamisc_exit();
2196 out_ccamisc_init_failed:
2197 zcdn_exit();
2198 out_zcdn_init_failed:
2199 zcrypt_debug_exit();
2200 out:
2201 return rc;
2202 }
2203
2204 /*
2205 * zcrypt_api_exit(): Module termination.
2206 *
2207 * The module termination code.
2208 */
zcrypt_api_exit(void)2209 void __exit zcrypt_api_exit(void)
2210 {
2211 zcdn_exit();
2212 misc_deregister(&zcrypt_misc_device);
2213 zcrypt_msgtype6_exit();
2214 zcrypt_msgtype50_exit();
2215 zcrypt_ccamisc_exit();
2216 zcrypt_ep11misc_exit();
2217 zcrypt_debug_exit();
2218 }
2219
2220 module_init(zcrypt_api_init);
2221 module_exit(zcrypt_api_exit);
2222