1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #define KMSG_COMPONENT "zcrypt" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/export.h> 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/miscdevice.h> 23 #include <linux/fs.h> 24 #include <linux/compat.h> 25 #include <linux/slab.h> 26 #include <linux/atomic.h> 27 #include <linux/uaccess.h> 28 #include <linux/hw_random.h> 29 #include <linux/debugfs.h> 30 #include <linux/cdev.h> 31 #include <linux/ctype.h> 32 #include <linux/capability.h> 33 #include <asm/debug.h> 34 35 #define CREATE_TRACE_POINTS 36 #include <asm/trace/zcrypt.h> 37 38 #include "zcrypt_api.h" 39 #include "zcrypt_debug.h" 40 41 #include "zcrypt_msgtype6.h" 42 #include "zcrypt_msgtype50.h" 43 #include "zcrypt_ccamisc.h" 44 #include "zcrypt_ep11misc.h" 45 46 /* 47 * Module description. 48 */ 49 MODULE_AUTHOR("IBM Corporation"); 50 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 51 "Copyright IBM Corp. 2001, 2012"); 52 MODULE_LICENSE("GPL"); 53 54 unsigned int zcrypt_mempool_threshold = 5; 55 module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); 56 MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); 57 58 /* 59 * zcrypt tracepoint functions 60 */ 61 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 62 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 63 64 DEFINE_SPINLOCK(zcrypt_list_lock); 65 LIST_HEAD(zcrypt_card_list); 66 67 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 68 69 static LIST_HEAD(zcrypt_ops_list); 70 71 /* Zcrypt related debug feature stuff. */ 72 debug_info_t *zcrypt_dbf_info; 73 74 /* 75 * Process a rescan of the transport layer. 76 * Runs a synchronous AP bus rescan. 77 * Returns true if something has changed (for example the 78 * bus scan has found and build up new devices) and it is 79 * worth to do a retry. Otherwise false is returned meaning 80 * no changes on the AP bus level. 81 */ 82 static inline bool zcrypt_process_rescan(void) 83 { 84 return ap_bus_force_rescan(); 85 } 86 87 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 88 { 89 list_add_tail(&zops->list, &zcrypt_ops_list); 90 } 91 92 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 93 { 94 list_del_init(&zops->list); 95 } 96 97 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 98 { 99 struct zcrypt_ops *zops; 100 101 list_for_each_entry(zops, &zcrypt_ops_list, list) 102 if (zops->variant == variant && 103 (!strncmp(zops->name, name, sizeof(zops->name)))) 104 return zops; 105 return NULL; 106 } 107 EXPORT_SYMBOL(zcrypt_msgtype); 108 109 /* 110 * Multi device nodes extension functions. 111 */ 112 113 struct zcdn_device; 114 115 static void zcdn_device_release(struct device *dev); 116 static const struct class zcrypt_class = { 117 .name = ZCRYPT_NAME, 118 .dev_release = zcdn_device_release, 119 }; 120 static dev_t zcrypt_devt; 121 static struct cdev zcrypt_cdev; 122 123 struct zcdn_device { 124 struct device device; 125 struct ap_perms perms; 126 }; 127 128 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 129 130 #define ZCDN_MAX_NAME 32 131 132 static int zcdn_create(const char *name); 133 static int zcdn_destroy(const char *name); 134 135 /* 136 * Find zcdn device by name. 137 * Returns reference to the zcdn device which needs to be released 138 * with put_device() after use. 139 */ 140 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 141 { 142 struct device *dev = class_find_device_by_name(&zcrypt_class, name); 143 144 return dev ? to_zcdn_dev(dev) : NULL; 145 } 146 147 /* 148 * Find zcdn device by devt value. 149 * Returns reference to the zcdn device which needs to be released 150 * with put_device() after use. 151 */ 152 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 153 { 154 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt); 155 156 return dev ? to_zcdn_dev(dev) : NULL; 157 } 158 159 static ssize_t ioctlmask_show(struct device *dev, 160 struct device_attribute *attr, 161 char *buf) 162 { 163 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 164 int i, n; 165 166 if (mutex_lock_interruptible(&ap_perms_mutex)) 167 return -ERESTARTSYS; 168 169 n = sysfs_emit(buf, "0x"); 170 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 171 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 172 n += sysfs_emit_at(buf, n, "\n"); 173 174 mutex_unlock(&ap_perms_mutex); 175 176 return n; 177 } 178 179 static ssize_t ioctlmask_store(struct device *dev, 180 struct device_attribute *attr, 181 const char *buf, size_t count) 182 { 183 int rc; 184 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 185 186 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 187 AP_IOCTLS, &ap_perms_mutex); 188 if (rc) 189 return rc; 190 191 return count; 192 } 193 194 static DEVICE_ATTR_RW(ioctlmask); 195 196 static ssize_t apmask_show(struct device *dev, 197 struct device_attribute *attr, 198 char *buf) 199 { 200 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 201 int i, n; 202 203 if (mutex_lock_interruptible(&ap_perms_mutex)) 204 return -ERESTARTSYS; 205 206 n = sysfs_emit(buf, "0x"); 207 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 208 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 209 n += sysfs_emit_at(buf, n, "\n"); 210 211 mutex_unlock(&ap_perms_mutex); 212 213 return n; 214 } 215 216 static ssize_t apmask_store(struct device *dev, 217 struct device_attribute *attr, 218 const char *buf, size_t count) 219 { 220 int rc; 221 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 222 223 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 224 AP_DEVICES, &ap_perms_mutex); 225 if (rc) 226 return rc; 227 228 return count; 229 } 230 231 static DEVICE_ATTR_RW(apmask); 232 233 static ssize_t aqmask_show(struct device *dev, 234 struct device_attribute *attr, 235 char *buf) 236 { 237 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 238 int i, n; 239 240 if (mutex_lock_interruptible(&ap_perms_mutex)) 241 return -ERESTARTSYS; 242 243 n = sysfs_emit(buf, "0x"); 244 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 245 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 246 n += sysfs_emit_at(buf, n, "\n"); 247 248 mutex_unlock(&ap_perms_mutex); 249 250 return n; 251 } 252 253 static ssize_t aqmask_store(struct device *dev, 254 struct device_attribute *attr, 255 const char *buf, size_t count) 256 { 257 int rc; 258 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 259 260 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 261 AP_DOMAINS, &ap_perms_mutex); 262 if (rc) 263 return rc; 264 265 return count; 266 } 267 268 static DEVICE_ATTR_RW(aqmask); 269 270 static ssize_t admask_show(struct device *dev, 271 struct device_attribute *attr, 272 char *buf) 273 { 274 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 275 int i, n; 276 277 if (mutex_lock_interruptible(&ap_perms_mutex)) 278 return -ERESTARTSYS; 279 280 n = sysfs_emit(buf, "0x"); 281 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) 282 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 283 n += sysfs_emit_at(buf, n, "\n"); 284 285 mutex_unlock(&ap_perms_mutex); 286 287 return n; 288 } 289 290 static ssize_t admask_store(struct device *dev, 291 struct device_attribute *attr, 292 const char *buf, size_t count) 293 { 294 int rc; 295 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 296 297 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 298 AP_DOMAINS, &ap_perms_mutex); 299 if (rc) 300 return rc; 301 302 return count; 303 } 304 305 static DEVICE_ATTR_RW(admask); 306 307 static struct attribute *zcdn_dev_attrs[] = { 308 &dev_attr_ioctlmask.attr, 309 &dev_attr_apmask.attr, 310 &dev_attr_aqmask.attr, 311 &dev_attr_admask.attr, 312 NULL 313 }; 314 315 static struct attribute_group zcdn_dev_attr_group = { 316 .attrs = zcdn_dev_attrs 317 }; 318 319 static const struct attribute_group *zcdn_dev_attr_groups[] = { 320 &zcdn_dev_attr_group, 321 NULL 322 }; 323 324 static ssize_t zcdn_create_store(const struct class *class, 325 const struct class_attribute *attr, 326 const char *buf, size_t count) 327 { 328 int rc; 329 char name[ZCDN_MAX_NAME]; 330 331 strscpy(name, skip_spaces(buf), sizeof(name)); 332 333 rc = zcdn_create(strim(name)); 334 335 return rc ? rc : count; 336 } 337 338 static const struct class_attribute class_attr_zcdn_create = 339 __ATTR(create, 0600, NULL, zcdn_create_store); 340 341 static ssize_t zcdn_destroy_store(const struct class *class, 342 const struct class_attribute *attr, 343 const char *buf, size_t count) 344 { 345 int rc; 346 char name[ZCDN_MAX_NAME]; 347 348 strscpy(name, skip_spaces(buf), sizeof(name)); 349 350 rc = zcdn_destroy(strim(name)); 351 352 return rc ? rc : count; 353 } 354 355 static const struct class_attribute class_attr_zcdn_destroy = 356 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 357 358 static void zcdn_device_release(struct device *dev) 359 { 360 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 361 362 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n", 363 __func__, MAJOR(dev->devt), MINOR(dev->devt)); 364 365 kfree(zcdndev); 366 } 367 368 static int zcdn_create(const char *name) 369 { 370 dev_t devt; 371 int i, rc = 0; 372 struct zcdn_device *zcdndev; 373 374 if (mutex_lock_interruptible(&ap_perms_mutex)) 375 return -ERESTARTSYS; 376 377 /* check if device node with this name already exists */ 378 if (name[0]) { 379 zcdndev = find_zcdndev_by_name(name); 380 if (zcdndev) { 381 put_device(&zcdndev->device); 382 rc = -EEXIST; 383 goto unlockout; 384 } 385 } 386 387 /* find an unused minor number */ 388 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 389 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 390 zcdndev = find_zcdndev_by_devt(devt); 391 if (zcdndev) 392 put_device(&zcdndev->device); 393 else 394 break; 395 } 396 if (i == ZCRYPT_MAX_MINOR_NODES) { 397 rc = -ENOSPC; 398 goto unlockout; 399 } 400 401 /* alloc and prepare a new zcdn device */ 402 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 403 if (!zcdndev) { 404 rc = -ENOMEM; 405 goto unlockout; 406 } 407 zcdndev->device.release = zcdn_device_release; 408 zcdndev->device.class = &zcrypt_class; 409 zcdndev->device.devt = devt; 410 zcdndev->device.groups = zcdn_dev_attr_groups; 411 if (name[0]) 412 rc = dev_set_name(&zcdndev->device, "%s", name); 413 else 414 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); 415 if (rc) { 416 kfree(zcdndev); 417 goto unlockout; 418 } 419 rc = device_register(&zcdndev->device); 420 if (rc) { 421 put_device(&zcdndev->device); 422 goto unlockout; 423 } 424 425 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n", 426 __func__, MAJOR(devt), MINOR(devt)); 427 428 unlockout: 429 mutex_unlock(&ap_perms_mutex); 430 return rc; 431 } 432 433 static int zcdn_destroy(const char *name) 434 { 435 int rc = 0; 436 struct zcdn_device *zcdndev; 437 438 if (mutex_lock_interruptible(&ap_perms_mutex)) 439 return -ERESTARTSYS; 440 441 /* try to find this zcdn device */ 442 zcdndev = find_zcdndev_by_name(name); 443 if (!zcdndev) { 444 rc = -ENOENT; 445 goto unlockout; 446 } 447 448 /* 449 * The zcdn device is not hard destroyed. It is subject to 450 * reference counting and thus just needs to be unregistered. 451 */ 452 put_device(&zcdndev->device); 453 device_unregister(&zcdndev->device); 454 455 unlockout: 456 mutex_unlock(&ap_perms_mutex); 457 return rc; 458 } 459 460 static void zcdn_destroy_all(void) 461 { 462 int i; 463 dev_t devt; 464 struct zcdn_device *zcdndev; 465 466 mutex_lock(&ap_perms_mutex); 467 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 468 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 469 zcdndev = find_zcdndev_by_devt(devt); 470 if (zcdndev) { 471 put_device(&zcdndev->device); 472 device_unregister(&zcdndev->device); 473 } 474 } 475 mutex_unlock(&ap_perms_mutex); 476 } 477 478 /* 479 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 480 * 481 * This function is not supported beyond zcrypt 1.3.1. 482 */ 483 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 484 size_t count, loff_t *f_pos) 485 { 486 return -EPERM; 487 } 488 489 /* 490 * zcrypt_write(): Not allowed. 491 * 492 * Write is not allowed 493 */ 494 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 495 size_t count, loff_t *f_pos) 496 { 497 return -EPERM; 498 } 499 500 /* 501 * zcrypt_open(): Count number of users. 502 * 503 * Device open function to count number of users. 504 */ 505 static int zcrypt_open(struct inode *inode, struct file *filp) 506 { 507 struct ap_perms *perms = &ap_perms; 508 509 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 510 struct zcdn_device *zcdndev; 511 512 if (mutex_lock_interruptible(&ap_perms_mutex)) 513 return -ERESTARTSYS; 514 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 515 /* find returns a reference, no get_device() needed */ 516 mutex_unlock(&ap_perms_mutex); 517 if (zcdndev) 518 perms = &zcdndev->perms; 519 } 520 filp->private_data = (void *)perms; 521 522 atomic_inc(&zcrypt_open_count); 523 return stream_open(inode, filp); 524 } 525 526 /* 527 * zcrypt_release(): Count number of users. 528 * 529 * Device close function to count number of users. 530 */ 531 static int zcrypt_release(struct inode *inode, struct file *filp) 532 { 533 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 534 struct zcdn_device *zcdndev; 535 536 mutex_lock(&ap_perms_mutex); 537 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 538 mutex_unlock(&ap_perms_mutex); 539 if (zcdndev) { 540 /* 2 puts here: one for find, one for open */ 541 put_device(&zcdndev->device); 542 put_device(&zcdndev->device); 543 } 544 } 545 546 atomic_dec(&zcrypt_open_count); 547 return 0; 548 } 549 550 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 551 unsigned int cmd) 552 { 553 int rc = -EPERM; 554 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 555 556 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 557 if (test_bit_inv(ioctlnr, perms->ioctlm)) 558 rc = 0; 559 } 560 561 if (rc) 562 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n", 563 __func__, ioctlnr, rc); 564 565 return rc; 566 } 567 568 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 569 { 570 return test_bit_inv(card, perms->apm) ? true : false; 571 } 572 573 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 574 { 575 return test_bit_inv(queue, perms->aqm) ? true : false; 576 } 577 578 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 579 struct zcrypt_queue *zq, 580 struct module **pmod, 581 unsigned int weight) 582 { 583 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 584 return NULL; 585 zcrypt_card_get(zc); 586 zcrypt_queue_get(zq); 587 get_device(&zq->queue->ap_dev.device); 588 atomic_add(weight, &zc->load); 589 atomic_add(weight, &zq->load); 590 zq->request_count++; 591 *pmod = zq->queue->ap_dev.device.driver->owner; 592 return zq; 593 } 594 595 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 596 struct zcrypt_queue *zq, 597 struct module *mod, 598 unsigned int weight) 599 { 600 zq->request_count--; 601 atomic_sub(weight, &zc->load); 602 atomic_sub(weight, &zq->load); 603 put_device(&zq->queue->ap_dev.device); 604 zcrypt_queue_put(zq); 605 zcrypt_card_put(zc); 606 module_put(mod); 607 } 608 609 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 610 struct zcrypt_card *pref_zc, 611 unsigned int weight, 612 unsigned int pref_weight) 613 { 614 if (!pref_zc) 615 return true; 616 weight += atomic_read(&zc->load); 617 pref_weight += atomic_read(&pref_zc->load); 618 if (weight == pref_weight) 619 return atomic64_read(&zc->card->total_request_count) < 620 atomic64_read(&pref_zc->card->total_request_count); 621 return weight < pref_weight; 622 } 623 624 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 625 struct zcrypt_queue *pref_zq, 626 unsigned int weight, 627 unsigned int pref_weight) 628 { 629 if (!pref_zq) 630 return true; 631 weight += atomic_read(&zq->load); 632 pref_weight += atomic_read(&pref_zq->load); 633 if (weight == pref_weight) 634 return zq->queue->total_request_count < 635 pref_zq->queue->total_request_count; 636 return weight < pref_weight; 637 } 638 639 /* 640 * zcrypt ioctls. 641 */ 642 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 643 struct zcrypt_track *tr, 644 struct ica_rsa_modexpo *mex) 645 { 646 struct zcrypt_card *zc, *pref_zc; 647 struct zcrypt_queue *zq, *pref_zq; 648 struct ap_message ap_msg; 649 unsigned int wgt = 0, pref_wgt = 0; 650 unsigned int func_code = 0; 651 int cpen, qpen, qid = 0, rc; 652 struct module *mod; 653 654 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 655 656 rc = ap_init_apmsg(&ap_msg, 0); 657 if (rc) 658 goto out; 659 660 if (mex->outputdatalength < mex->inputdatalength) { 661 rc = -EINVAL; 662 goto out; 663 } 664 665 /* 666 * As long as outputdatalength is big enough, we can set the 667 * outputdatalength equal to the inputdatalength, since that is the 668 * number of bytes we will copy in any case 669 */ 670 mex->outputdatalength = mex->inputdatalength; 671 672 rc = get_rsa_modex_fc(mex, &func_code); 673 if (rc) 674 goto out; 675 676 pref_zc = NULL; 677 pref_zq = NULL; 678 spin_lock(&zcrypt_list_lock); 679 for_each_zcrypt_card(zc) { 680 /* Check for usable accelerator or CCA card */ 681 if (!zc->online || !zc->card->config || zc->card->chkstop || 682 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 683 continue; 684 /* Check for size limits */ 685 if (zc->min_mod_size > mex->inputdatalength || 686 zc->max_mod_size < mex->inputdatalength) 687 continue; 688 /* check if device node has admission for this card */ 689 if (!zcrypt_check_card(perms, zc->card->id)) 690 continue; 691 /* get weight index of the card device */ 692 wgt = zc->speed_rating[func_code]; 693 /* penalty if this msg was previously sent via this card */ 694 cpen = (tr && tr->again_counter && tr->last_qid && 695 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 696 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 697 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 698 continue; 699 for_each_zcrypt_queue(zq, zc) { 700 /* check if device is usable and eligible */ 701 if (!zq->online || !zq->ops->rsa_modexpo || 702 !ap_queue_usable(zq->queue)) 703 continue; 704 /* check if device node has admission for this queue */ 705 if (!zcrypt_check_queue(perms, 706 AP_QID_QUEUE(zq->queue->qid))) 707 continue; 708 /* penalty if the msg was previously sent at this qid */ 709 qpen = (tr && tr->again_counter && tr->last_qid && 710 tr->last_qid == zq->queue->qid) ? 711 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 712 if (!zcrypt_queue_compare(zq, pref_zq, 713 wgt + cpen + qpen, pref_wgt)) 714 continue; 715 pref_zc = zc; 716 pref_zq = zq; 717 pref_wgt = wgt + cpen + qpen; 718 } 719 } 720 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 721 spin_unlock(&zcrypt_list_lock); 722 723 if (!pref_zq) { 724 pr_debug("no matching queue found => ENODEV\n"); 725 rc = -ENODEV; 726 goto out; 727 } 728 729 qid = pref_zq->queue->qid; 730 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 731 732 spin_lock(&zcrypt_list_lock); 733 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 734 spin_unlock(&zcrypt_list_lock); 735 736 out: 737 ap_release_apmsg(&ap_msg); 738 if (tr) { 739 tr->last_rc = rc; 740 tr->last_qid = qid; 741 } 742 trace_s390_zcrypt_rep(mex, func_code, rc, 743 AP_QID_CARD(qid), AP_QID_QUEUE(qid), 744 ap_msg.psmid); 745 return rc; 746 } 747 748 static long zcrypt_rsa_crt(struct ap_perms *perms, 749 struct zcrypt_track *tr, 750 struct ica_rsa_modexpo_crt *crt) 751 { 752 struct zcrypt_card *zc, *pref_zc; 753 struct zcrypt_queue *zq, *pref_zq; 754 struct ap_message ap_msg; 755 unsigned int wgt = 0, pref_wgt = 0; 756 unsigned int func_code = 0; 757 int cpen, qpen, qid = 0, rc; 758 struct module *mod; 759 760 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 761 762 rc = ap_init_apmsg(&ap_msg, 0); 763 if (rc) 764 goto out; 765 766 if (crt->outputdatalength < crt->inputdatalength) { 767 rc = -EINVAL; 768 goto out; 769 } 770 771 /* 772 * As long as outputdatalength is big enough, we can set the 773 * outputdatalength equal to the inputdatalength, since that is the 774 * number of bytes we will copy in any case 775 */ 776 crt->outputdatalength = crt->inputdatalength; 777 778 rc = get_rsa_crt_fc(crt, &func_code); 779 if (rc) 780 goto out; 781 782 pref_zc = NULL; 783 pref_zq = NULL; 784 spin_lock(&zcrypt_list_lock); 785 for_each_zcrypt_card(zc) { 786 /* Check for usable accelerator or CCA card */ 787 if (!zc->online || !zc->card->config || zc->card->chkstop || 788 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 789 continue; 790 /* Check for size limits */ 791 if (zc->min_mod_size > crt->inputdatalength || 792 zc->max_mod_size < crt->inputdatalength) 793 continue; 794 /* check if device node has admission for this card */ 795 if (!zcrypt_check_card(perms, zc->card->id)) 796 continue; 797 /* get weight index of the card device */ 798 wgt = zc->speed_rating[func_code]; 799 /* penalty if this msg was previously sent via this card */ 800 cpen = (tr && tr->again_counter && tr->last_qid && 801 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 802 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 803 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 804 continue; 805 for_each_zcrypt_queue(zq, zc) { 806 /* check if device is usable and eligible */ 807 if (!zq->online || !zq->ops->rsa_modexpo_crt || 808 !ap_queue_usable(zq->queue)) 809 continue; 810 /* check if device node has admission for this queue */ 811 if (!zcrypt_check_queue(perms, 812 AP_QID_QUEUE(zq->queue->qid))) 813 continue; 814 /* penalty if the msg was previously sent at this qid */ 815 qpen = (tr && tr->again_counter && tr->last_qid && 816 tr->last_qid == zq->queue->qid) ? 817 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 818 if (!zcrypt_queue_compare(zq, pref_zq, 819 wgt + cpen + qpen, pref_wgt)) 820 continue; 821 pref_zc = zc; 822 pref_zq = zq; 823 pref_wgt = wgt + cpen + qpen; 824 } 825 } 826 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 827 spin_unlock(&zcrypt_list_lock); 828 829 if (!pref_zq) { 830 pr_debug("no matching queue found => ENODEV\n"); 831 rc = -ENODEV; 832 goto out; 833 } 834 835 qid = pref_zq->queue->qid; 836 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 837 838 spin_lock(&zcrypt_list_lock); 839 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 840 spin_unlock(&zcrypt_list_lock); 841 842 out: 843 ap_release_apmsg(&ap_msg); 844 if (tr) { 845 tr->last_rc = rc; 846 tr->last_qid = qid; 847 } 848 trace_s390_zcrypt_rep(crt, func_code, rc, 849 AP_QID_CARD(qid), AP_QID_QUEUE(qid), 850 ap_msg.psmid); 851 return rc; 852 } 853 854 static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, 855 struct zcrypt_track *tr, 856 struct ica_xcRB *xcrb) 857 { 858 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; 859 struct zcrypt_card *zc, *pref_zc; 860 struct zcrypt_queue *zq, *pref_zq; 861 struct ap_message ap_msg; 862 unsigned int wgt = 0, pref_wgt = 0; 863 unsigned int func_code = 0; 864 unsigned short *domain, tdom; 865 int cpen, qpen, qid = 0, rc; 866 struct module *mod; 867 868 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 869 870 xcrb->status = 0; 871 872 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? 873 AP_MSG_FLAG_MEMPOOL : 0); 874 if (rc) 875 goto out; 876 877 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 878 if (rc) 879 goto out; 880 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1, 881 ap_msg.msg, ap_msg.len, false); 882 883 tdom = *domain; 884 if (perms != &ap_perms && tdom < AP_DOMAINS) { 885 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 886 if (!test_bit_inv(tdom, perms->adm)) { 887 rc = -ENODEV; 888 goto out; 889 } 890 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 891 rc = -EOPNOTSUPP; 892 goto out; 893 } 894 } 895 /* 896 * If a valid target domain is set and this domain is NOT a usage 897 * domain but a control only domain, autoselect target domain. 898 */ 899 if (tdom < AP_DOMAINS && 900 !ap_test_config_usage_domain(tdom) && 901 ap_test_config_ctrl_domain(tdom)) 902 tdom = AUTOSEL_DOM; 903 904 pref_zc = NULL; 905 pref_zq = NULL; 906 spin_lock(&zcrypt_list_lock); 907 for_each_zcrypt_card(zc) { 908 /* Check for usable CCA card */ 909 if (!zc->online || !zc->card->config || zc->card->chkstop || 910 !zc->card->hwinfo.cca) 911 continue; 912 /* Check for user selected CCA card */ 913 if (xcrb->user_defined != AUTOSELECT && 914 xcrb->user_defined != zc->card->id) 915 continue; 916 /* check if request size exceeds card max msg size */ 917 if (ap_msg.len > zc->card->maxmsgsize) 918 continue; 919 /* check if device node has admission for this card */ 920 if (!zcrypt_check_card(perms, zc->card->id)) 921 continue; 922 /* get weight index of the card device */ 923 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 924 /* penalty if this msg was previously sent via this card */ 925 cpen = (tr && tr->again_counter && tr->last_qid && 926 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 927 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 928 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 929 continue; 930 for_each_zcrypt_queue(zq, zc) { 931 /* check for device usable and eligible */ 932 if (!zq->online || !zq->ops->send_cprb || 933 !ap_queue_usable(zq->queue) || 934 (tdom != AUTOSEL_DOM && 935 tdom != AP_QID_QUEUE(zq->queue->qid))) 936 continue; 937 /* check if device node has admission for this queue */ 938 if (!zcrypt_check_queue(perms, 939 AP_QID_QUEUE(zq->queue->qid))) 940 continue; 941 /* penalty if the msg was previously sent at this qid */ 942 qpen = (tr && tr->again_counter && tr->last_qid && 943 tr->last_qid == zq->queue->qid) ? 944 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 945 if (!zcrypt_queue_compare(zq, pref_zq, 946 wgt + cpen + qpen, pref_wgt)) 947 continue; 948 pref_zc = zc; 949 pref_zq = zq; 950 pref_wgt = wgt + cpen + qpen; 951 } 952 } 953 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 954 spin_unlock(&zcrypt_list_lock); 955 956 if (!pref_zq) { 957 pr_debug("no match for address %02x.%04x => ENODEV\n", 958 xcrb->user_defined, *domain); 959 rc = -ENODEV; 960 goto out; 961 } 962 963 /* in case of auto select, provide the correct domain */ 964 qid = pref_zq->queue->qid; 965 if (*domain == AUTOSEL_DOM) 966 *domain = AP_QID_QUEUE(qid); 967 968 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 969 if (!rc) { 970 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 971 ap_msg.msg, ap_msg.len, false); 972 } 973 974 spin_lock(&zcrypt_list_lock); 975 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 976 spin_unlock(&zcrypt_list_lock); 977 978 out: 979 ap_release_apmsg(&ap_msg); 980 if (tr) { 981 tr->last_rc = rc; 982 tr->last_qid = qid; 983 } 984 trace_s390_zcrypt_rep(xcrb, func_code, rc, 985 AP_QID_CARD(qid), AP_QID_QUEUE(qid), 986 ap_msg.psmid); 987 return rc; 988 } 989 990 long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) 991 { 992 struct zcrypt_track tr; 993 int rc; 994 995 memset(&tr, 0, sizeof(tr)); 996 997 do { 998 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); 999 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1000 1001 /* on ENODEV failure: retry once again after a requested rescan */ 1002 if (rc == -ENODEV && zcrypt_process_rescan()) 1003 do { 1004 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); 1005 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1006 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1007 rc = -EIO; 1008 if (rc) 1009 pr_debug("rc=%d\n", rc); 1010 1011 return rc; 1012 } 1013 EXPORT_SYMBOL(zcrypt_send_cprb); 1014 1015 static bool is_desired_ep11_card(unsigned int dev_id, 1016 unsigned short target_num, 1017 struct ep11_target_dev *targets) 1018 { 1019 while (target_num-- > 0) { 1020 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 1021 return true; 1022 targets++; 1023 } 1024 return false; 1025 } 1026 1027 static bool is_desired_ep11_queue(unsigned int dev_qid, 1028 unsigned short target_num, 1029 struct ep11_target_dev *targets) 1030 { 1031 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 1032 1033 while (target_num-- > 0) { 1034 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 1035 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1036 return true; 1037 targets++; 1038 } 1039 return false; 1040 } 1041 1042 static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, 1043 struct zcrypt_track *tr, 1044 struct ep11_urb *xcrb) 1045 { 1046 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; 1047 struct zcrypt_card *zc, *pref_zc; 1048 struct zcrypt_queue *zq, *pref_zq; 1049 struct ep11_target_dev *targets = NULL; 1050 unsigned short target_num; 1051 unsigned int wgt = 0, pref_wgt = 0; 1052 unsigned int func_code = 0, domain; 1053 struct ap_message ap_msg; 1054 int cpen, qpen, qid = 0, rc; 1055 struct module *mod; 1056 1057 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1058 1059 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? 1060 AP_MSG_FLAG_MEMPOOL : 0); 1061 if (rc) 1062 goto out; 1063 1064 target_num = (unsigned short)xcrb->targets_num; 1065 1066 /* empty list indicates autoselect (all available targets) */ 1067 rc = -ENOMEM; 1068 if (target_num != 0) { 1069 if (userspace) { 1070 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1071 if (!targets) 1072 goto out; 1073 if (copy_from_user(targets, xcrb->targets, 1074 target_num * sizeof(*targets))) { 1075 rc = -EFAULT; 1076 goto out; 1077 } 1078 } else { 1079 targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; 1080 } 1081 } 1082 1083 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1084 if (rc) 1085 goto out; 1086 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, 1087 ap_msg.msg, ap_msg.len, false); 1088 1089 if (perms != &ap_perms && domain < AUTOSEL_DOM) { 1090 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1091 if (!test_bit_inv(domain, perms->adm)) { 1092 rc = -ENODEV; 1093 goto out; 1094 } 1095 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1096 rc = -EOPNOTSUPP; 1097 goto out; 1098 } 1099 } 1100 1101 pref_zc = NULL; 1102 pref_zq = NULL; 1103 spin_lock(&zcrypt_list_lock); 1104 for_each_zcrypt_card(zc) { 1105 /* Check for usable EP11 card */ 1106 if (!zc->online || !zc->card->config || zc->card->chkstop || 1107 !zc->card->hwinfo.ep11) 1108 continue; 1109 /* Check for user selected EP11 card */ 1110 if (targets && 1111 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1112 continue; 1113 /* check if request size exceeds card max msg size */ 1114 if (ap_msg.len > zc->card->maxmsgsize) 1115 continue; 1116 /* check if device node has admission for this card */ 1117 if (!zcrypt_check_card(perms, zc->card->id)) 1118 continue; 1119 /* get weight index of the card device */ 1120 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1121 /* penalty if this msg was previously sent via this card */ 1122 cpen = (tr && tr->again_counter && tr->last_qid && 1123 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1124 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1125 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1126 continue; 1127 for_each_zcrypt_queue(zq, zc) { 1128 /* check if device is usable and eligible */ 1129 if (!zq->online || !zq->ops->send_ep11_cprb || 1130 !ap_queue_usable(zq->queue) || 1131 (targets && 1132 !is_desired_ep11_queue(zq->queue->qid, 1133 target_num, targets))) 1134 continue; 1135 /* check if device node has admission for this queue */ 1136 if (!zcrypt_check_queue(perms, 1137 AP_QID_QUEUE(zq->queue->qid))) 1138 continue; 1139 /* penalty if the msg was previously sent at this qid */ 1140 qpen = (tr && tr->again_counter && tr->last_qid && 1141 tr->last_qid == zq->queue->qid) ? 1142 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1143 if (!zcrypt_queue_compare(zq, pref_zq, 1144 wgt + cpen + qpen, pref_wgt)) 1145 continue; 1146 pref_zc = zc; 1147 pref_zq = zq; 1148 pref_wgt = wgt + cpen + qpen; 1149 } 1150 } 1151 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1152 spin_unlock(&zcrypt_list_lock); 1153 1154 if (!pref_zq) { 1155 if (targets && target_num == 1) { 1156 pr_debug("no match for address %02x.%04x => ENODEV\n", 1157 (int)targets->ap_id, (int)targets->dom_id); 1158 } else if (targets) { 1159 pr_debug("no match for %d target addrs => ENODEV\n", 1160 (int)target_num); 1161 } else { 1162 pr_debug("no match for address ff.ffff => ENODEV\n"); 1163 } 1164 rc = -ENODEV; 1165 goto out; 1166 } 1167 1168 qid = pref_zq->queue->qid; 1169 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1170 if (!rc) { 1171 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 1172 ap_msg.msg, ap_msg.len, false); 1173 } 1174 1175 spin_lock(&zcrypt_list_lock); 1176 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1177 spin_unlock(&zcrypt_list_lock); 1178 1179 out: 1180 if (userspace) 1181 kfree(targets); 1182 ap_release_apmsg(&ap_msg); 1183 if (tr) { 1184 tr->last_rc = rc; 1185 tr->last_qid = qid; 1186 } 1187 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1188 AP_QID_CARD(qid), AP_QID_QUEUE(qid), 1189 ap_msg.psmid); 1190 return rc; 1191 } 1192 1193 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) 1194 { 1195 struct zcrypt_track tr; 1196 int rc; 1197 1198 memset(&tr, 0, sizeof(tr)); 1199 1200 do { 1201 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); 1202 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1203 1204 /* on ENODEV failure: retry once again after a requested rescan */ 1205 if (rc == -ENODEV && zcrypt_process_rescan()) 1206 do { 1207 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); 1208 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1209 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1210 rc = -EIO; 1211 if (rc) 1212 pr_debug("rc=%d\n", rc); 1213 1214 return rc; 1215 } 1216 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1217 1218 static long zcrypt_rng(char *buffer) 1219 { 1220 struct zcrypt_card *zc, *pref_zc; 1221 struct zcrypt_queue *zq, *pref_zq; 1222 unsigned int wgt = 0, pref_wgt = 0; 1223 unsigned int func_code = 0; 1224 struct ap_message ap_msg; 1225 unsigned int domain; 1226 int qid = 0, rc = -ENODEV; 1227 struct module *mod; 1228 1229 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1230 1231 rc = ap_init_apmsg(&ap_msg, 0); 1232 if (rc) 1233 goto out; 1234 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1235 if (rc) 1236 goto out; 1237 1238 pref_zc = NULL; 1239 pref_zq = NULL; 1240 spin_lock(&zcrypt_list_lock); 1241 for_each_zcrypt_card(zc) { 1242 /* Check for usable CCA card */ 1243 if (!zc->online || !zc->card->config || zc->card->chkstop || 1244 !zc->card->hwinfo.cca) 1245 continue; 1246 /* get weight index of the card device */ 1247 wgt = zc->speed_rating[func_code]; 1248 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1249 continue; 1250 for_each_zcrypt_queue(zq, zc) { 1251 /* check if device is usable and eligible */ 1252 if (!zq->online || !zq->ops->rng || 1253 !ap_queue_usable(zq->queue)) 1254 continue; 1255 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1256 continue; 1257 pref_zc = zc; 1258 pref_zq = zq; 1259 pref_wgt = wgt; 1260 } 1261 } 1262 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1263 spin_unlock(&zcrypt_list_lock); 1264 1265 if (!pref_zq) { 1266 pr_debug("no matching queue found => ENODEV\n"); 1267 rc = -ENODEV; 1268 goto out; 1269 } 1270 1271 qid = pref_zq->queue->qid; 1272 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1273 1274 spin_lock(&zcrypt_list_lock); 1275 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1276 spin_unlock(&zcrypt_list_lock); 1277 1278 out: 1279 ap_release_apmsg(&ap_msg); 1280 trace_s390_zcrypt_rep(buffer, func_code, rc, 1281 AP_QID_CARD(qid), AP_QID_QUEUE(qid), 1282 ap_msg.psmid); 1283 return rc; 1284 } 1285 1286 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1287 { 1288 struct zcrypt_card *zc; 1289 struct zcrypt_queue *zq; 1290 struct zcrypt_device_status *stat; 1291 int card, queue; 1292 1293 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1294 * sizeof(struct zcrypt_device_status)); 1295 1296 spin_lock(&zcrypt_list_lock); 1297 for_each_zcrypt_card(zc) { 1298 for_each_zcrypt_queue(zq, zc) { 1299 card = AP_QID_CARD(zq->queue->qid); 1300 if (card >= MAX_ZDEV_CARDIDS) 1301 continue; 1302 queue = AP_QID_QUEUE(zq->queue->qid); 1303 stat = &devstatus[card * AP_DOMAINS + queue]; 1304 stat->hwtype = zc->card->ap_dev.device_type; 1305 stat->functions = zc->card->hwinfo.fac >> 26; 1306 stat->qid = zq->queue->qid; 1307 stat->online = zq->online ? 0x01 : 0x00; 1308 } 1309 } 1310 spin_unlock(&zcrypt_list_lock); 1311 } 1312 1313 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, 1314 int maxcard, int maxqueue) 1315 { 1316 struct zcrypt_card *zc; 1317 struct zcrypt_queue *zq; 1318 struct zcrypt_device_status_ext *stat; 1319 int card, queue; 1320 1321 maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); 1322 maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); 1323 1324 spin_lock(&zcrypt_list_lock); 1325 for_each_zcrypt_card(zc) { 1326 for_each_zcrypt_queue(zq, zc) { 1327 card = AP_QID_CARD(zq->queue->qid); 1328 queue = AP_QID_QUEUE(zq->queue->qid); 1329 if (card >= maxcard || queue >= maxqueue) 1330 continue; 1331 stat = &devstatus[card * maxqueue + queue]; 1332 stat->hwtype = zc->card->ap_dev.device_type; 1333 stat->functions = zc->card->hwinfo.fac >> 26; 1334 stat->qid = zq->queue->qid; 1335 stat->online = zq->online ? 0x01 : 0x00; 1336 } 1337 } 1338 spin_unlock(&zcrypt_list_lock); 1339 } 1340 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1341 1342 int zcrypt_device_status_ext(int card, int queue, 1343 struct zcrypt_device_status_ext *devstat) 1344 { 1345 struct zcrypt_card *zc; 1346 struct zcrypt_queue *zq; 1347 1348 memset(devstat, 0, sizeof(*devstat)); 1349 1350 spin_lock(&zcrypt_list_lock); 1351 for_each_zcrypt_card(zc) { 1352 for_each_zcrypt_queue(zq, zc) { 1353 if (card == AP_QID_CARD(zq->queue->qid) && 1354 queue == AP_QID_QUEUE(zq->queue->qid)) { 1355 devstat->hwtype = zc->card->ap_dev.device_type; 1356 devstat->functions = zc->card->hwinfo.fac >> 26; 1357 devstat->qid = zq->queue->qid; 1358 devstat->online = zq->online ? 0x01 : 0x00; 1359 spin_unlock(&zcrypt_list_lock); 1360 return 0; 1361 } 1362 } 1363 } 1364 spin_unlock(&zcrypt_list_lock); 1365 1366 return -ENODEV; 1367 } 1368 EXPORT_SYMBOL(zcrypt_device_status_ext); 1369 1370 static void zcrypt_status_mask(char status[], size_t max_adapters) 1371 { 1372 struct zcrypt_card *zc; 1373 struct zcrypt_queue *zq; 1374 int card; 1375 1376 memset(status, 0, max_adapters); 1377 spin_lock(&zcrypt_list_lock); 1378 for_each_zcrypt_card(zc) { 1379 for_each_zcrypt_queue(zq, zc) { 1380 card = AP_QID_CARD(zq->queue->qid); 1381 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1382 card >= max_adapters) 1383 continue; 1384 status[card] = zc->online ? zc->user_space_type : 0x0d; 1385 } 1386 } 1387 spin_unlock(&zcrypt_list_lock); 1388 } 1389 1390 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1391 { 1392 struct zcrypt_card *zc; 1393 struct zcrypt_queue *zq; 1394 int card; 1395 1396 memset(qdepth, 0, max_adapters); 1397 spin_lock(&zcrypt_list_lock); 1398 local_bh_disable(); 1399 for_each_zcrypt_card(zc) { 1400 for_each_zcrypt_queue(zq, zc) { 1401 card = AP_QID_CARD(zq->queue->qid); 1402 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1403 card >= max_adapters) 1404 continue; 1405 spin_lock(&zq->queue->lock); 1406 qdepth[card] = 1407 zq->queue->pendingq_count + 1408 zq->queue->requestq_count; 1409 spin_unlock(&zq->queue->lock); 1410 } 1411 } 1412 local_bh_enable(); 1413 spin_unlock(&zcrypt_list_lock); 1414 } 1415 1416 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1417 { 1418 struct zcrypt_card *zc; 1419 struct zcrypt_queue *zq; 1420 int card; 1421 u64 cnt; 1422 1423 memset(reqcnt, 0, sizeof(int) * max_adapters); 1424 spin_lock(&zcrypt_list_lock); 1425 local_bh_disable(); 1426 for_each_zcrypt_card(zc) { 1427 for_each_zcrypt_queue(zq, zc) { 1428 card = AP_QID_CARD(zq->queue->qid); 1429 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1430 card >= max_adapters) 1431 continue; 1432 spin_lock(&zq->queue->lock); 1433 cnt = zq->queue->total_request_count; 1434 spin_unlock(&zq->queue->lock); 1435 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1436 } 1437 } 1438 local_bh_enable(); 1439 spin_unlock(&zcrypt_list_lock); 1440 } 1441 1442 static int zcrypt_pendingq_count(void) 1443 { 1444 struct zcrypt_card *zc; 1445 struct zcrypt_queue *zq; 1446 int pendingq_count; 1447 1448 pendingq_count = 0; 1449 spin_lock(&zcrypt_list_lock); 1450 local_bh_disable(); 1451 for_each_zcrypt_card(zc) { 1452 for_each_zcrypt_queue(zq, zc) { 1453 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1454 continue; 1455 spin_lock(&zq->queue->lock); 1456 pendingq_count += zq->queue->pendingq_count; 1457 spin_unlock(&zq->queue->lock); 1458 } 1459 } 1460 local_bh_enable(); 1461 spin_unlock(&zcrypt_list_lock); 1462 return pendingq_count; 1463 } 1464 1465 static int zcrypt_requestq_count(void) 1466 { 1467 struct zcrypt_card *zc; 1468 struct zcrypt_queue *zq; 1469 int requestq_count; 1470 1471 requestq_count = 0; 1472 spin_lock(&zcrypt_list_lock); 1473 local_bh_disable(); 1474 for_each_zcrypt_card(zc) { 1475 for_each_zcrypt_queue(zq, zc) { 1476 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1477 continue; 1478 spin_lock(&zq->queue->lock); 1479 requestq_count += zq->queue->requestq_count; 1480 spin_unlock(&zq->queue->lock); 1481 } 1482 } 1483 local_bh_enable(); 1484 spin_unlock(&zcrypt_list_lock); 1485 return requestq_count; 1486 } 1487 1488 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1489 { 1490 int rc; 1491 struct zcrypt_track tr; 1492 struct ica_rsa_modexpo mex; 1493 struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1494 1495 memset(&tr, 0, sizeof(tr)); 1496 if (copy_from_user(&mex, umex, sizeof(mex))) 1497 return -EFAULT; 1498 1499 do { 1500 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1501 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1502 1503 /* on ENODEV failure: retry once again after a requested rescan */ 1504 if (rc == -ENODEV && zcrypt_process_rescan()) 1505 do { 1506 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1507 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1508 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1509 rc = -EIO; 1510 if (rc) { 1511 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc); 1512 return rc; 1513 } 1514 return put_user(mex.outputdatalength, &umex->outputdatalength); 1515 } 1516 1517 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1518 { 1519 int rc; 1520 struct zcrypt_track tr; 1521 struct ica_rsa_modexpo_crt crt; 1522 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1523 1524 memset(&tr, 0, sizeof(tr)); 1525 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1526 return -EFAULT; 1527 1528 do { 1529 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1530 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1531 1532 /* on ENODEV failure: retry once again after a requested rescan */ 1533 if (rc == -ENODEV && zcrypt_process_rescan()) 1534 do { 1535 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1536 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1537 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1538 rc = -EIO; 1539 if (rc) { 1540 pr_debug("ioctl ICARSACRT rc=%d\n", rc); 1541 return rc; 1542 } 1543 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1544 } 1545 1546 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1547 { 1548 int rc; 1549 struct ica_xcRB xcrb; 1550 struct zcrypt_track tr; 1551 u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1552 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1553 1554 memset(&tr, 0, sizeof(tr)); 1555 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1556 return -EFAULT; 1557 1558 do { 1559 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); 1560 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1561 1562 /* on ENODEV failure: retry once again after a requested rescan */ 1563 if (rc == -ENODEV && zcrypt_process_rescan()) 1564 do { 1565 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); 1566 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1567 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1568 rc = -EIO; 1569 if (rc) 1570 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1571 rc, xcrb.status); 1572 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1573 return -EFAULT; 1574 return rc; 1575 } 1576 1577 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1578 { 1579 int rc; 1580 struct ep11_urb xcrb; 1581 struct zcrypt_track tr; 1582 u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1583 struct ep11_urb __user *uxcrb = (void __user *)arg; 1584 1585 memset(&tr, 0, sizeof(tr)); 1586 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1587 return -EFAULT; 1588 1589 do { 1590 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); 1591 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1592 1593 /* on ENODEV failure: retry once again after a requested rescan */ 1594 if (rc == -ENODEV && zcrypt_process_rescan()) 1595 do { 1596 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); 1597 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1598 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1599 rc = -EIO; 1600 if (rc) 1601 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc); 1602 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1603 return -EFAULT; 1604 return rc; 1605 } 1606 1607 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1608 unsigned long arg) 1609 { 1610 int rc; 1611 struct ap_perms *perms = 1612 (struct ap_perms *)filp->private_data; 1613 1614 rc = zcrypt_check_ioctl(perms, cmd); 1615 if (rc) 1616 return rc; 1617 1618 switch (cmd) { 1619 case ICARSAMODEXPO: 1620 return icarsamodexpo_ioctl(perms, arg); 1621 case ICARSACRT: 1622 return icarsacrt_ioctl(perms, arg); 1623 case ZSECSENDCPRB: 1624 return zsecsendcprb_ioctl(perms, arg); 1625 case ZSENDEP11CPRB: 1626 return zsendep11cprb_ioctl(perms, arg); 1627 case ZCRYPT_DEVICE_STATUS: { 1628 struct zcrypt_device_status_ext *device_status; 1629 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1630 * sizeof(struct zcrypt_device_status_ext); 1631 1632 device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, 1633 sizeof(struct zcrypt_device_status_ext), 1634 GFP_KERNEL); 1635 if (!device_status) 1636 return -ENOMEM; 1637 zcrypt_device_status_mask_ext(device_status, 1638 MAX_ZDEV_CARDIDS_EXT, 1639 MAX_ZDEV_DOMAINS_EXT); 1640 if (copy_to_user((char __user *)arg, device_status, 1641 total_size)) 1642 rc = -EFAULT; 1643 kvfree(device_status); 1644 return rc; 1645 } 1646 case ZCRYPT_STATUS_MASK: { 1647 char status[AP_DEVICES]; 1648 1649 zcrypt_status_mask(status, AP_DEVICES); 1650 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1651 return -EFAULT; 1652 return 0; 1653 } 1654 case ZCRYPT_QDEPTH_MASK: { 1655 char qdepth[AP_DEVICES]; 1656 1657 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1658 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1659 return -EFAULT; 1660 return 0; 1661 } 1662 case ZCRYPT_PERDEV_REQCNT: { 1663 u32 *reqcnt; 1664 1665 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1666 if (!reqcnt) 1667 return -ENOMEM; 1668 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1669 if (copy_to_user((int __user *)arg, reqcnt, 1670 sizeof(u32) * AP_DEVICES)) 1671 rc = -EFAULT; 1672 kfree(reqcnt); 1673 return rc; 1674 } 1675 case Z90STAT_REQUESTQ_COUNT: 1676 return put_user(zcrypt_requestq_count(), (int __user *)arg); 1677 case Z90STAT_PENDINGQ_COUNT: 1678 return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1679 case Z90STAT_TOTALOPEN_COUNT: 1680 return put_user(atomic_read(&zcrypt_open_count), 1681 (int __user *)arg); 1682 case Z90STAT_DOMAIN_INDEX: 1683 return put_user(ap_domain_index, (int __user *)arg); 1684 /* 1685 * Deprecated ioctls 1686 */ 1687 case ZDEVICESTATUS: { 1688 /* the old ioctl supports only 64 adapters */ 1689 struct zcrypt_device_status *device_status; 1690 size_t total_size = MAX_ZDEV_ENTRIES 1691 * sizeof(struct zcrypt_device_status); 1692 1693 device_status = kzalloc(total_size, GFP_KERNEL); 1694 if (!device_status) 1695 return -ENOMEM; 1696 zcrypt_device_status_mask(device_status); 1697 if (copy_to_user((char __user *)arg, device_status, 1698 total_size)) 1699 rc = -EFAULT; 1700 kfree(device_status); 1701 return rc; 1702 } 1703 case Z90STAT_STATUS_MASK: { 1704 /* the old ioctl supports only 64 adapters */ 1705 char status[MAX_ZDEV_CARDIDS]; 1706 1707 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1708 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1709 return -EFAULT; 1710 return 0; 1711 } 1712 case Z90STAT_QDEPTH_MASK: { 1713 /* the old ioctl supports only 64 adapters */ 1714 char qdepth[MAX_ZDEV_CARDIDS]; 1715 1716 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1717 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1718 return -EFAULT; 1719 return 0; 1720 } 1721 case Z90STAT_PERDEV_REQCNT: { 1722 /* the old ioctl supports only 64 adapters */ 1723 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1724 1725 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1726 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1727 return -EFAULT; 1728 return 0; 1729 } 1730 /* unknown ioctl number */ 1731 default: 1732 pr_debug("unknown ioctl 0x%08x\n", cmd); 1733 return -ENOIOCTLCMD; 1734 } 1735 } 1736 1737 #ifdef CONFIG_COMPAT 1738 /* 1739 * ioctl32 conversion routines 1740 */ 1741 struct compat_ica_rsa_modexpo { 1742 compat_uptr_t inputdata; 1743 unsigned int inputdatalength; 1744 compat_uptr_t outputdata; 1745 unsigned int outputdatalength; 1746 compat_uptr_t b_key; 1747 compat_uptr_t n_modulus; 1748 }; 1749 1750 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1751 unsigned int cmd, unsigned long arg) 1752 { 1753 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1754 struct compat_ica_rsa_modexpo mex32; 1755 struct ica_rsa_modexpo mex64; 1756 struct zcrypt_track tr; 1757 long rc; 1758 1759 memset(&tr, 0, sizeof(tr)); 1760 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1761 return -EFAULT; 1762 mex64.inputdata = compat_ptr(mex32.inputdata); 1763 mex64.inputdatalength = mex32.inputdatalength; 1764 mex64.outputdata = compat_ptr(mex32.outputdata); 1765 mex64.outputdatalength = mex32.outputdatalength; 1766 mex64.b_key = compat_ptr(mex32.b_key); 1767 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1768 do { 1769 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1770 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1771 1772 /* on ENODEV failure: retry once again after a requested rescan */ 1773 if (rc == -ENODEV && zcrypt_process_rescan()) 1774 do { 1775 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1776 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1777 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1778 rc = -EIO; 1779 if (rc) 1780 return rc; 1781 return put_user(mex64.outputdatalength, 1782 &umex32->outputdatalength); 1783 } 1784 1785 struct compat_ica_rsa_modexpo_crt { 1786 compat_uptr_t inputdata; 1787 unsigned int inputdatalength; 1788 compat_uptr_t outputdata; 1789 unsigned int outputdatalength; 1790 compat_uptr_t bp_key; 1791 compat_uptr_t bq_key; 1792 compat_uptr_t np_prime; 1793 compat_uptr_t nq_prime; 1794 compat_uptr_t u_mult_inv; 1795 }; 1796 1797 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1798 unsigned int cmd, unsigned long arg) 1799 { 1800 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1801 struct compat_ica_rsa_modexpo_crt crt32; 1802 struct ica_rsa_modexpo_crt crt64; 1803 struct zcrypt_track tr; 1804 long rc; 1805 1806 memset(&tr, 0, sizeof(tr)); 1807 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1808 return -EFAULT; 1809 crt64.inputdata = compat_ptr(crt32.inputdata); 1810 crt64.inputdatalength = crt32.inputdatalength; 1811 crt64.outputdata = compat_ptr(crt32.outputdata); 1812 crt64.outputdatalength = crt32.outputdatalength; 1813 crt64.bp_key = compat_ptr(crt32.bp_key); 1814 crt64.bq_key = compat_ptr(crt32.bq_key); 1815 crt64.np_prime = compat_ptr(crt32.np_prime); 1816 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1817 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1818 do { 1819 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1820 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1821 1822 /* on ENODEV failure: retry once again after a requested rescan */ 1823 if (rc == -ENODEV && zcrypt_process_rescan()) 1824 do { 1825 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1826 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1827 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1828 rc = -EIO; 1829 if (rc) 1830 return rc; 1831 return put_user(crt64.outputdatalength, 1832 &ucrt32->outputdatalength); 1833 } 1834 1835 struct compat_ica_xcrb { 1836 unsigned short agent_ID; 1837 unsigned int user_defined; 1838 unsigned short request_ID; 1839 unsigned int request_control_blk_length; 1840 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1841 compat_uptr_t request_control_blk_addr; 1842 unsigned int request_data_length; 1843 char padding2[16 - sizeof(compat_uptr_t)]; 1844 compat_uptr_t request_data_address; 1845 unsigned int reply_control_blk_length; 1846 char padding3[16 - sizeof(compat_uptr_t)]; 1847 compat_uptr_t reply_control_blk_addr; 1848 unsigned int reply_data_length; 1849 char padding4[16 - sizeof(compat_uptr_t)]; 1850 compat_uptr_t reply_data_addr; 1851 unsigned short priority_window; 1852 unsigned int status; 1853 } __packed; 1854 1855 static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1856 unsigned int cmd, unsigned long arg) 1857 { 1858 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1859 u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1860 struct compat_ica_xcrb xcrb32; 1861 struct zcrypt_track tr; 1862 struct ica_xcRB xcrb64; 1863 long rc; 1864 1865 memset(&tr, 0, sizeof(tr)); 1866 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1867 return -EFAULT; 1868 xcrb64.agent_ID = xcrb32.agent_ID; 1869 xcrb64.user_defined = xcrb32.user_defined; 1870 xcrb64.request_ID = xcrb32.request_ID; 1871 xcrb64.request_control_blk_length = 1872 xcrb32.request_control_blk_length; 1873 xcrb64.request_control_blk_addr = 1874 compat_ptr(xcrb32.request_control_blk_addr); 1875 xcrb64.request_data_length = 1876 xcrb32.request_data_length; 1877 xcrb64.request_data_address = 1878 compat_ptr(xcrb32.request_data_address); 1879 xcrb64.reply_control_blk_length = 1880 xcrb32.reply_control_blk_length; 1881 xcrb64.reply_control_blk_addr = 1882 compat_ptr(xcrb32.reply_control_blk_addr); 1883 xcrb64.reply_data_length = xcrb32.reply_data_length; 1884 xcrb64.reply_data_addr = 1885 compat_ptr(xcrb32.reply_data_addr); 1886 xcrb64.priority_window = xcrb32.priority_window; 1887 xcrb64.status = xcrb32.status; 1888 do { 1889 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1890 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1891 1892 /* on ENODEV failure: retry once again after a requested rescan */ 1893 if (rc == -ENODEV && zcrypt_process_rescan()) 1894 do { 1895 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1896 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1897 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1898 rc = -EIO; 1899 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1900 xcrb32.reply_data_length = xcrb64.reply_data_length; 1901 xcrb32.status = xcrb64.status; 1902 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1903 return -EFAULT; 1904 return rc; 1905 } 1906 1907 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1908 unsigned long arg) 1909 { 1910 int rc; 1911 struct ap_perms *perms = 1912 (struct ap_perms *)filp->private_data; 1913 1914 rc = zcrypt_check_ioctl(perms, cmd); 1915 if (rc) 1916 return rc; 1917 1918 if (cmd == ICARSAMODEXPO) 1919 return trans_modexpo32(perms, filp, cmd, arg); 1920 if (cmd == ICARSACRT) 1921 return trans_modexpo_crt32(perms, filp, cmd, arg); 1922 if (cmd == ZSECSENDCPRB) 1923 return trans_xcrb32(perms, filp, cmd, arg); 1924 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1925 } 1926 #endif 1927 1928 /* 1929 * Misc device file operations. 1930 */ 1931 static const struct file_operations zcrypt_fops = { 1932 .owner = THIS_MODULE, 1933 .read = zcrypt_read, 1934 .write = zcrypt_write, 1935 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1936 #ifdef CONFIG_COMPAT 1937 .compat_ioctl = zcrypt_compat_ioctl, 1938 #endif 1939 .open = zcrypt_open, 1940 .release = zcrypt_release, 1941 }; 1942 1943 /* 1944 * Misc device. 1945 */ 1946 static struct miscdevice zcrypt_misc_device = { 1947 .minor = MISC_DYNAMIC_MINOR, 1948 .name = "z90crypt", 1949 .fops = &zcrypt_fops, 1950 }; 1951 1952 static int zcrypt_rng_device_count; 1953 static u32 *zcrypt_rng_buffer; 1954 static int zcrypt_rng_buffer_index; 1955 static DEFINE_MUTEX(zcrypt_rng_mutex); 1956 1957 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1958 { 1959 int rc; 1960 1961 /* 1962 * We don't need locking here because the RNG API guarantees serialized 1963 * read method calls. 1964 */ 1965 if (zcrypt_rng_buffer_index == 0) { 1966 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1967 /* on ENODEV failure: retry once again after an AP bus rescan */ 1968 if (rc == -ENODEV && zcrypt_process_rescan()) 1969 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1970 if (rc < 0) 1971 return -EIO; 1972 zcrypt_rng_buffer_index = rc / sizeof(*data); 1973 } 1974 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1975 return sizeof(*data); 1976 } 1977 1978 static struct hwrng zcrypt_rng_dev = { 1979 .name = "zcrypt", 1980 .data_read = zcrypt_rng_data_read, 1981 .quality = 990, 1982 }; 1983 1984 int zcrypt_rng_device_add(void) 1985 { 1986 int rc = 0; 1987 1988 mutex_lock(&zcrypt_rng_mutex); 1989 if (zcrypt_rng_device_count == 0) { 1990 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 1991 if (!zcrypt_rng_buffer) { 1992 rc = -ENOMEM; 1993 goto out; 1994 } 1995 zcrypt_rng_buffer_index = 0; 1996 rc = hwrng_register(&zcrypt_rng_dev); 1997 if (rc) 1998 goto out_free; 1999 zcrypt_rng_device_count = 1; 2000 } else { 2001 zcrypt_rng_device_count++; 2002 } 2003 mutex_unlock(&zcrypt_rng_mutex); 2004 return 0; 2005 2006 out_free: 2007 free_page((unsigned long)zcrypt_rng_buffer); 2008 out: 2009 mutex_unlock(&zcrypt_rng_mutex); 2010 return rc; 2011 } 2012 2013 void zcrypt_rng_device_remove(void) 2014 { 2015 mutex_lock(&zcrypt_rng_mutex); 2016 zcrypt_rng_device_count--; 2017 if (zcrypt_rng_device_count == 0) { 2018 hwrng_unregister(&zcrypt_rng_dev); 2019 free_page((unsigned long)zcrypt_rng_buffer); 2020 } 2021 mutex_unlock(&zcrypt_rng_mutex); 2022 } 2023 2024 /* 2025 * Wait until the zcrypt api is operational. 2026 * The AP bus scan and the binding of ap devices to device drivers is 2027 * an asynchronous job. This function waits until these initial jobs 2028 * are done and so the zcrypt api should be ready to serve crypto 2029 * requests - if there are resources available. The function uses an 2030 * internal timeout of 30s. The very first caller will either wait for 2031 * ap bus bindings complete or the timeout happens. This state will be 2032 * remembered for further callers which will only be blocked until a 2033 * decision is made (timeout or bindings complete). 2034 * On timeout -ETIME is returned, on success the return value is 0. 2035 */ 2036 int zcrypt_wait_api_operational(void) 2037 { 2038 static DEFINE_MUTEX(zcrypt_wait_api_lock); 2039 static int zcrypt_wait_api_state; 2040 int rc; 2041 2042 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 2043 if (rc) 2044 return rc; 2045 2046 switch (zcrypt_wait_api_state) { 2047 case 0: 2048 /* initial state, invoke wait for the ap bus complete */ 2049 rc = ap_wait_apqn_bindings_complete( 2050 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); 2051 switch (rc) { 2052 case 0: 2053 /* ap bus bindings are complete */ 2054 zcrypt_wait_api_state = 1; 2055 break; 2056 case -EINTR: 2057 /* interrupted, go back to caller */ 2058 break; 2059 case -ETIME: 2060 /* timeout */ 2061 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n", 2062 __func__); 2063 zcrypt_wait_api_state = -ETIME; 2064 break; 2065 default: 2066 /* other failure */ 2067 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc); 2068 break; 2069 } 2070 break; 2071 case 1: 2072 /* a previous caller already found ap bus bindings complete */ 2073 rc = 0; 2074 break; 2075 default: 2076 /* a previous caller had timeout or other failure */ 2077 rc = zcrypt_wait_api_state; 2078 break; 2079 } 2080 2081 mutex_unlock(&zcrypt_wait_api_lock); 2082 2083 return rc; 2084 } 2085 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2086 2087 int __init zcrypt_debug_init(void) 2088 { 2089 zcrypt_dbf_info = debug_register("zcrypt", 2, 1, 2090 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2091 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2092 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2093 2094 return 0; 2095 } 2096 2097 void zcrypt_debug_exit(void) 2098 { 2099 debug_unregister(zcrypt_dbf_info); 2100 } 2101 2102 static int __init zcdn_init(void) 2103 { 2104 int rc; 2105 2106 /* create a new class 'zcrypt' */ 2107 rc = class_register(&zcrypt_class); 2108 if (rc) 2109 goto out_class_register_failed; 2110 2111 /* alloc device minor range */ 2112 rc = alloc_chrdev_region(&zcrypt_devt, 2113 0, ZCRYPT_MAX_MINOR_NODES, 2114 ZCRYPT_NAME); 2115 if (rc) 2116 goto out_alloc_chrdev_failed; 2117 2118 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2119 zcrypt_cdev.owner = THIS_MODULE; 2120 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2121 if (rc) 2122 goto out_cdev_add_failed; 2123 2124 /* need some class specific sysfs attributes */ 2125 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create); 2126 if (rc) 2127 goto out_class_create_file_1_failed; 2128 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy); 2129 if (rc) 2130 goto out_class_create_file_2_failed; 2131 2132 return 0; 2133 2134 out_class_create_file_2_failed: 2135 class_remove_file(&zcrypt_class, &class_attr_zcdn_create); 2136 out_class_create_file_1_failed: 2137 cdev_del(&zcrypt_cdev); 2138 out_cdev_add_failed: 2139 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2140 out_alloc_chrdev_failed: 2141 class_unregister(&zcrypt_class); 2142 out_class_register_failed: 2143 return rc; 2144 } 2145 2146 static void zcdn_exit(void) 2147 { 2148 class_remove_file(&zcrypt_class, &class_attr_zcdn_create); 2149 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy); 2150 zcdn_destroy_all(); 2151 cdev_del(&zcrypt_cdev); 2152 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2153 class_unregister(&zcrypt_class); 2154 } 2155 2156 /* 2157 * zcrypt_api_init(): Module initialization. 2158 * 2159 * The module initialization code. 2160 */ 2161 int __init zcrypt_api_init(void) 2162 { 2163 int rc; 2164 2165 /* make sure the mempool threshold is >= 1 */ 2166 if (zcrypt_mempool_threshold < 1) { 2167 rc = -EINVAL; 2168 goto out; 2169 } 2170 2171 rc = zcrypt_debug_init(); 2172 if (rc) 2173 goto out; 2174 2175 rc = zcdn_init(); 2176 if (rc) 2177 goto out_zcdn_init_failed; 2178 2179 rc = zcrypt_ccamisc_init(); 2180 if (rc) 2181 goto out_ccamisc_init_failed; 2182 2183 rc = zcrypt_ep11misc_init(); 2184 if (rc) 2185 goto out_ep11misc_init_failed; 2186 2187 /* Register the request sprayer. */ 2188 rc = misc_register(&zcrypt_misc_device); 2189 if (rc < 0) 2190 goto out_misc_register_failed; 2191 2192 zcrypt_msgtype6_init(); 2193 zcrypt_msgtype50_init(); 2194 2195 return 0; 2196 2197 out_misc_register_failed: 2198 zcrypt_ep11misc_exit(); 2199 out_ep11misc_init_failed: 2200 zcrypt_ccamisc_exit(); 2201 out_ccamisc_init_failed: 2202 zcdn_exit(); 2203 out_zcdn_init_failed: 2204 zcrypt_debug_exit(); 2205 out: 2206 return rc; 2207 } 2208 2209 /* 2210 * zcrypt_api_exit(): Module termination. 2211 * 2212 * The module termination code. 2213 */ 2214 void __exit zcrypt_api_exit(void) 2215 { 2216 zcdn_exit(); 2217 misc_deregister(&zcrypt_misc_device); 2218 zcrypt_msgtype6_exit(); 2219 zcrypt_msgtype50_exit(); 2220 zcrypt_ccamisc_exit(); 2221 zcrypt_ep11misc_exit(); 2222 zcrypt_debug_exit(); 2223 } 2224 2225 module_init(zcrypt_api_init); 2226 module_exit(zcrypt_api_exit); 2227