1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #define KMSG_COMPONENT "zcrypt" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/miscdevice.h> 22 #include <linux/fs.h> 23 #include <linux/compat.h> 24 #include <linux/slab.h> 25 #include <linux/atomic.h> 26 #include <linux/uaccess.h> 27 #include <linux/hw_random.h> 28 #include <linux/debugfs.h> 29 #include <linux/cdev.h> 30 #include <linux/ctype.h> 31 #include <linux/capability.h> 32 #include <asm/debug.h> 33 34 #define CREATE_TRACE_POINTS 35 #include <asm/trace/zcrypt.h> 36 37 #include "zcrypt_api.h" 38 #include "zcrypt_debug.h" 39 40 #include "zcrypt_msgtype6.h" 41 #include "zcrypt_msgtype50.h" 42 #include "zcrypt_ccamisc.h" 43 #include "zcrypt_ep11misc.h" 44 45 /* 46 * Module description. 47 */ 48 MODULE_AUTHOR("IBM Corporation"); 49 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 50 "Copyright IBM Corp. 2001, 2012"); 51 MODULE_LICENSE("GPL"); 52 53 /* 54 * zcrypt tracepoint functions 55 */ 56 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 57 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 58 59 DEFINE_SPINLOCK(zcrypt_list_lock); 60 LIST_HEAD(zcrypt_card_list); 61 62 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 63 64 static LIST_HEAD(zcrypt_ops_list); 65 66 /* Zcrypt related debug feature stuff. */ 67 debug_info_t *zcrypt_dbf_info; 68 69 /* 70 * Process a rescan of the transport layer. 71 * Runs a synchronous AP bus rescan. 72 * Returns true if something has changed (for example the 73 * bus scan has found and build up new devices) and it is 74 * worth to do a retry. Otherwise false is returned meaning 75 * no changes on the AP bus level. 76 */ 77 static inline bool zcrypt_process_rescan(void) 78 { 79 return ap_bus_force_rescan(); 80 } 81 82 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 83 { 84 list_add_tail(&zops->list, &zcrypt_ops_list); 85 } 86 87 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 88 { 89 list_del_init(&zops->list); 90 } 91 92 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 93 { 94 struct zcrypt_ops *zops; 95 96 list_for_each_entry(zops, &zcrypt_ops_list, list) 97 if (zops->variant == variant && 98 (!strncmp(zops->name, name, sizeof(zops->name)))) 99 return zops; 100 return NULL; 101 } 102 EXPORT_SYMBOL(zcrypt_msgtype); 103 104 /* 105 * Multi device nodes extension functions. 106 */ 107 108 struct zcdn_device; 109 110 static struct class *zcrypt_class; 111 static dev_t zcrypt_devt; 112 static struct cdev zcrypt_cdev; 113 114 struct zcdn_device { 115 struct device device; 116 struct ap_perms perms; 117 }; 118 119 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 120 121 #define ZCDN_MAX_NAME 32 122 123 static int zcdn_create(const char *name); 124 static int zcdn_destroy(const char *name); 125 126 /* 127 * Find zcdn device by name. 128 * Returns reference to the zcdn device which needs to be released 129 * with put_device() after use. 130 */ 131 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 132 { 133 struct device *dev = class_find_device_by_name(zcrypt_class, name); 134 135 return dev ? to_zcdn_dev(dev) : NULL; 136 } 137 138 /* 139 * Find zcdn device by devt value. 140 * Returns reference to the zcdn device which needs to be released 141 * with put_device() after use. 142 */ 143 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 144 { 145 struct device *dev = class_find_device_by_devt(zcrypt_class, devt); 146 147 return dev ? to_zcdn_dev(dev) : NULL; 148 } 149 150 static ssize_t ioctlmask_show(struct device *dev, 151 struct device_attribute *attr, 152 char *buf) 153 { 154 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 155 int i, n; 156 157 if (mutex_lock_interruptible(&ap_perms_mutex)) 158 return -ERESTARTSYS; 159 160 n = sysfs_emit(buf, "0x"); 161 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 162 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 163 n += sysfs_emit_at(buf, n, "\n"); 164 165 mutex_unlock(&ap_perms_mutex); 166 167 return n; 168 } 169 170 static ssize_t ioctlmask_store(struct device *dev, 171 struct device_attribute *attr, 172 const char *buf, size_t count) 173 { 174 int rc; 175 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 176 177 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 178 AP_IOCTLS, &ap_perms_mutex); 179 if (rc) 180 return rc; 181 182 return count; 183 } 184 185 static DEVICE_ATTR_RW(ioctlmask); 186 187 static ssize_t apmask_show(struct device *dev, 188 struct device_attribute *attr, 189 char *buf) 190 { 191 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 192 int i, n; 193 194 if (mutex_lock_interruptible(&ap_perms_mutex)) 195 return -ERESTARTSYS; 196 197 n = sysfs_emit(buf, "0x"); 198 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 199 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 200 n += sysfs_emit_at(buf, n, "\n"); 201 202 mutex_unlock(&ap_perms_mutex); 203 204 return n; 205 } 206 207 static ssize_t apmask_store(struct device *dev, 208 struct device_attribute *attr, 209 const char *buf, size_t count) 210 { 211 int rc; 212 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 213 214 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 215 AP_DEVICES, &ap_perms_mutex); 216 if (rc) 217 return rc; 218 219 return count; 220 } 221 222 static DEVICE_ATTR_RW(apmask); 223 224 static ssize_t aqmask_show(struct device *dev, 225 struct device_attribute *attr, 226 char *buf) 227 { 228 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 229 int i, n; 230 231 if (mutex_lock_interruptible(&ap_perms_mutex)) 232 return -ERESTARTSYS; 233 234 n = sysfs_emit(buf, "0x"); 235 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 236 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 237 n += sysfs_emit_at(buf, n, "\n"); 238 239 mutex_unlock(&ap_perms_mutex); 240 241 return n; 242 } 243 244 static ssize_t aqmask_store(struct device *dev, 245 struct device_attribute *attr, 246 const char *buf, size_t count) 247 { 248 int rc; 249 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 250 251 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 252 AP_DOMAINS, &ap_perms_mutex); 253 if (rc) 254 return rc; 255 256 return count; 257 } 258 259 static DEVICE_ATTR_RW(aqmask); 260 261 static ssize_t admask_show(struct device *dev, 262 struct device_attribute *attr, 263 char *buf) 264 { 265 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 266 int i, n; 267 268 if (mutex_lock_interruptible(&ap_perms_mutex)) 269 return -ERESTARTSYS; 270 271 n = sysfs_emit(buf, "0x"); 272 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) 273 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 274 n += sysfs_emit_at(buf, n, "\n"); 275 276 mutex_unlock(&ap_perms_mutex); 277 278 return n; 279 } 280 281 static ssize_t admask_store(struct device *dev, 282 struct device_attribute *attr, 283 const char *buf, size_t count) 284 { 285 int rc; 286 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 287 288 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 289 AP_DOMAINS, &ap_perms_mutex); 290 if (rc) 291 return rc; 292 293 return count; 294 } 295 296 static DEVICE_ATTR_RW(admask); 297 298 static struct attribute *zcdn_dev_attrs[] = { 299 &dev_attr_ioctlmask.attr, 300 &dev_attr_apmask.attr, 301 &dev_attr_aqmask.attr, 302 &dev_attr_admask.attr, 303 NULL 304 }; 305 306 static struct attribute_group zcdn_dev_attr_group = { 307 .attrs = zcdn_dev_attrs 308 }; 309 310 static const struct attribute_group *zcdn_dev_attr_groups[] = { 311 &zcdn_dev_attr_group, 312 NULL 313 }; 314 315 static ssize_t zcdn_create_store(const struct class *class, 316 const struct class_attribute *attr, 317 const char *buf, size_t count) 318 { 319 int rc; 320 char name[ZCDN_MAX_NAME]; 321 322 strscpy(name, skip_spaces(buf), sizeof(name)); 323 324 rc = zcdn_create(strim(name)); 325 326 return rc ? rc : count; 327 } 328 329 static const struct class_attribute class_attr_zcdn_create = 330 __ATTR(create, 0600, NULL, zcdn_create_store); 331 332 static ssize_t zcdn_destroy_store(const struct class *class, 333 const struct class_attribute *attr, 334 const char *buf, size_t count) 335 { 336 int rc; 337 char name[ZCDN_MAX_NAME]; 338 339 strscpy(name, skip_spaces(buf), sizeof(name)); 340 341 rc = zcdn_destroy(strim(name)); 342 343 return rc ? rc : count; 344 } 345 346 static const struct class_attribute class_attr_zcdn_destroy = 347 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 348 349 static void zcdn_device_release(struct device *dev) 350 { 351 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 352 353 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n", 354 __func__, MAJOR(dev->devt), MINOR(dev->devt)); 355 356 kfree(zcdndev); 357 } 358 359 static int zcdn_create(const char *name) 360 { 361 dev_t devt; 362 int i, rc = 0; 363 struct zcdn_device *zcdndev; 364 365 if (mutex_lock_interruptible(&ap_perms_mutex)) 366 return -ERESTARTSYS; 367 368 /* check if device node with this name already exists */ 369 if (name[0]) { 370 zcdndev = find_zcdndev_by_name(name); 371 if (zcdndev) { 372 put_device(&zcdndev->device); 373 rc = -EEXIST; 374 goto unlockout; 375 } 376 } 377 378 /* find an unused minor number */ 379 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 380 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 381 zcdndev = find_zcdndev_by_devt(devt); 382 if (zcdndev) 383 put_device(&zcdndev->device); 384 else 385 break; 386 } 387 if (i == ZCRYPT_MAX_MINOR_NODES) { 388 rc = -ENOSPC; 389 goto unlockout; 390 } 391 392 /* alloc and prepare a new zcdn device */ 393 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 394 if (!zcdndev) { 395 rc = -ENOMEM; 396 goto unlockout; 397 } 398 zcdndev->device.release = zcdn_device_release; 399 zcdndev->device.class = zcrypt_class; 400 zcdndev->device.devt = devt; 401 zcdndev->device.groups = zcdn_dev_attr_groups; 402 if (name[0]) 403 rc = dev_set_name(&zcdndev->device, "%s", name); 404 else 405 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); 406 if (rc) { 407 kfree(zcdndev); 408 goto unlockout; 409 } 410 rc = device_register(&zcdndev->device); 411 if (rc) { 412 put_device(&zcdndev->device); 413 goto unlockout; 414 } 415 416 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n", 417 __func__, MAJOR(devt), MINOR(devt)); 418 419 unlockout: 420 mutex_unlock(&ap_perms_mutex); 421 return rc; 422 } 423 424 static int zcdn_destroy(const char *name) 425 { 426 int rc = 0; 427 struct zcdn_device *zcdndev; 428 429 if (mutex_lock_interruptible(&ap_perms_mutex)) 430 return -ERESTARTSYS; 431 432 /* try to find this zcdn device */ 433 zcdndev = find_zcdndev_by_name(name); 434 if (!zcdndev) { 435 rc = -ENOENT; 436 goto unlockout; 437 } 438 439 /* 440 * The zcdn device is not hard destroyed. It is subject to 441 * reference counting and thus just needs to be unregistered. 442 */ 443 put_device(&zcdndev->device); 444 device_unregister(&zcdndev->device); 445 446 unlockout: 447 mutex_unlock(&ap_perms_mutex); 448 return rc; 449 } 450 451 static void zcdn_destroy_all(void) 452 { 453 int i; 454 dev_t devt; 455 struct zcdn_device *zcdndev; 456 457 mutex_lock(&ap_perms_mutex); 458 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 459 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 460 zcdndev = find_zcdndev_by_devt(devt); 461 if (zcdndev) { 462 put_device(&zcdndev->device); 463 device_unregister(&zcdndev->device); 464 } 465 } 466 mutex_unlock(&ap_perms_mutex); 467 } 468 469 /* 470 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 471 * 472 * This function is not supported beyond zcrypt 1.3.1. 473 */ 474 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 475 size_t count, loff_t *f_pos) 476 { 477 return -EPERM; 478 } 479 480 /* 481 * zcrypt_write(): Not allowed. 482 * 483 * Write is not allowed 484 */ 485 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 486 size_t count, loff_t *f_pos) 487 { 488 return -EPERM; 489 } 490 491 /* 492 * zcrypt_open(): Count number of users. 493 * 494 * Device open function to count number of users. 495 */ 496 static int zcrypt_open(struct inode *inode, struct file *filp) 497 { 498 struct ap_perms *perms = &ap_perms; 499 500 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 501 struct zcdn_device *zcdndev; 502 503 if (mutex_lock_interruptible(&ap_perms_mutex)) 504 return -ERESTARTSYS; 505 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 506 /* find returns a reference, no get_device() needed */ 507 mutex_unlock(&ap_perms_mutex); 508 if (zcdndev) 509 perms = &zcdndev->perms; 510 } 511 filp->private_data = (void *)perms; 512 513 atomic_inc(&zcrypt_open_count); 514 return stream_open(inode, filp); 515 } 516 517 /* 518 * zcrypt_release(): Count number of users. 519 * 520 * Device close function to count number of users. 521 */ 522 static int zcrypt_release(struct inode *inode, struct file *filp) 523 { 524 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 525 struct zcdn_device *zcdndev; 526 527 mutex_lock(&ap_perms_mutex); 528 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 529 mutex_unlock(&ap_perms_mutex); 530 if (zcdndev) { 531 /* 2 puts here: one for find, one for open */ 532 put_device(&zcdndev->device); 533 put_device(&zcdndev->device); 534 } 535 } 536 537 atomic_dec(&zcrypt_open_count); 538 return 0; 539 } 540 541 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 542 unsigned int cmd) 543 { 544 int rc = -EPERM; 545 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 546 547 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 548 if (test_bit_inv(ioctlnr, perms->ioctlm)) 549 rc = 0; 550 } 551 552 if (rc) 553 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n", 554 __func__, ioctlnr, rc); 555 556 return rc; 557 } 558 559 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 560 { 561 return test_bit_inv(card, perms->apm) ? true : false; 562 } 563 564 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 565 { 566 return test_bit_inv(queue, perms->aqm) ? true : false; 567 } 568 569 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 570 struct zcrypt_queue *zq, 571 struct module **pmod, 572 unsigned int weight) 573 { 574 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 575 return NULL; 576 zcrypt_queue_get(zq); 577 get_device(&zq->queue->ap_dev.device); 578 atomic_add(weight, &zc->load); 579 atomic_add(weight, &zq->load); 580 zq->request_count++; 581 *pmod = zq->queue->ap_dev.device.driver->owner; 582 return zq; 583 } 584 585 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 586 struct zcrypt_queue *zq, 587 struct module *mod, 588 unsigned int weight) 589 { 590 zq->request_count--; 591 atomic_sub(weight, &zc->load); 592 atomic_sub(weight, &zq->load); 593 put_device(&zq->queue->ap_dev.device); 594 zcrypt_queue_put(zq); 595 module_put(mod); 596 } 597 598 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 599 struct zcrypt_card *pref_zc, 600 unsigned int weight, 601 unsigned int pref_weight) 602 { 603 if (!pref_zc) 604 return true; 605 weight += atomic_read(&zc->load); 606 pref_weight += atomic_read(&pref_zc->load); 607 if (weight == pref_weight) 608 return atomic64_read(&zc->card->total_request_count) < 609 atomic64_read(&pref_zc->card->total_request_count); 610 return weight < pref_weight; 611 } 612 613 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 614 struct zcrypt_queue *pref_zq, 615 unsigned int weight, 616 unsigned int pref_weight) 617 { 618 if (!pref_zq) 619 return true; 620 weight += atomic_read(&zq->load); 621 pref_weight += atomic_read(&pref_zq->load); 622 if (weight == pref_weight) 623 return zq->queue->total_request_count < 624 pref_zq->queue->total_request_count; 625 return weight < pref_weight; 626 } 627 628 /* 629 * zcrypt ioctls. 630 */ 631 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 632 struct zcrypt_track *tr, 633 struct ica_rsa_modexpo *mex) 634 { 635 struct zcrypt_card *zc, *pref_zc; 636 struct zcrypt_queue *zq, *pref_zq; 637 struct ap_message ap_msg; 638 unsigned int wgt = 0, pref_wgt = 0; 639 unsigned int func_code; 640 int cpen, qpen, qid = 0, rc = -ENODEV; 641 struct module *mod; 642 643 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 644 645 ap_init_message(&ap_msg); 646 647 if (mex->outputdatalength < mex->inputdatalength) { 648 func_code = 0; 649 rc = -EINVAL; 650 goto out; 651 } 652 653 /* 654 * As long as outputdatalength is big enough, we can set the 655 * outputdatalength equal to the inputdatalength, since that is the 656 * number of bytes we will copy in any case 657 */ 658 mex->outputdatalength = mex->inputdatalength; 659 660 rc = get_rsa_modex_fc(mex, &func_code); 661 if (rc) 662 goto out; 663 664 pref_zc = NULL; 665 pref_zq = NULL; 666 spin_lock(&zcrypt_list_lock); 667 for_each_zcrypt_card(zc) { 668 /* Check for usable accelerator or CCA card */ 669 if (!zc->online || !zc->card->config || zc->card->chkstop || 670 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 671 continue; 672 /* Check for size limits */ 673 if (zc->min_mod_size > mex->inputdatalength || 674 zc->max_mod_size < mex->inputdatalength) 675 continue; 676 /* check if device node has admission for this card */ 677 if (!zcrypt_check_card(perms, zc->card->id)) 678 continue; 679 /* get weight index of the card device */ 680 wgt = zc->speed_rating[func_code]; 681 /* penalty if this msg was previously sent via this card */ 682 cpen = (tr && tr->again_counter && tr->last_qid && 683 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 684 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 685 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 686 continue; 687 for_each_zcrypt_queue(zq, zc) { 688 /* check if device is usable and eligible */ 689 if (!zq->online || !zq->ops->rsa_modexpo || 690 !ap_queue_usable(zq->queue)) 691 continue; 692 /* check if device node has admission for this queue */ 693 if (!zcrypt_check_queue(perms, 694 AP_QID_QUEUE(zq->queue->qid))) 695 continue; 696 /* penalty if the msg was previously sent at this qid */ 697 qpen = (tr && tr->again_counter && tr->last_qid && 698 tr->last_qid == zq->queue->qid) ? 699 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 700 if (!zcrypt_queue_compare(zq, pref_zq, 701 wgt + cpen + qpen, pref_wgt)) 702 continue; 703 pref_zc = zc; 704 pref_zq = zq; 705 pref_wgt = wgt + cpen + qpen; 706 } 707 } 708 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 709 spin_unlock(&zcrypt_list_lock); 710 711 if (!pref_zq) { 712 pr_debug("%s no matching queue found => ENODEV\n", __func__); 713 rc = -ENODEV; 714 goto out; 715 } 716 717 qid = pref_zq->queue->qid; 718 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 719 720 spin_lock(&zcrypt_list_lock); 721 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 722 spin_unlock(&zcrypt_list_lock); 723 724 out: 725 ap_release_message(&ap_msg); 726 if (tr) { 727 tr->last_rc = rc; 728 tr->last_qid = qid; 729 } 730 trace_s390_zcrypt_rep(mex, func_code, rc, 731 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 732 return rc; 733 } 734 735 static long zcrypt_rsa_crt(struct ap_perms *perms, 736 struct zcrypt_track *tr, 737 struct ica_rsa_modexpo_crt *crt) 738 { 739 struct zcrypt_card *zc, *pref_zc; 740 struct zcrypt_queue *zq, *pref_zq; 741 struct ap_message ap_msg; 742 unsigned int wgt = 0, pref_wgt = 0; 743 unsigned int func_code; 744 int cpen, qpen, qid = 0, rc = -ENODEV; 745 struct module *mod; 746 747 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 748 749 ap_init_message(&ap_msg); 750 751 if (crt->outputdatalength < crt->inputdatalength) { 752 func_code = 0; 753 rc = -EINVAL; 754 goto out; 755 } 756 757 /* 758 * As long as outputdatalength is big enough, we can set the 759 * outputdatalength equal to the inputdatalength, since that is the 760 * number of bytes we will copy in any case 761 */ 762 crt->outputdatalength = crt->inputdatalength; 763 764 rc = get_rsa_crt_fc(crt, &func_code); 765 if (rc) 766 goto out; 767 768 pref_zc = NULL; 769 pref_zq = NULL; 770 spin_lock(&zcrypt_list_lock); 771 for_each_zcrypt_card(zc) { 772 /* Check for usable accelerator or CCA card */ 773 if (!zc->online || !zc->card->config || zc->card->chkstop || 774 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 775 continue; 776 /* Check for size limits */ 777 if (zc->min_mod_size > crt->inputdatalength || 778 zc->max_mod_size < crt->inputdatalength) 779 continue; 780 /* check if device node has admission for this card */ 781 if (!zcrypt_check_card(perms, zc->card->id)) 782 continue; 783 /* get weight index of the card device */ 784 wgt = zc->speed_rating[func_code]; 785 /* penalty if this msg was previously sent via this card */ 786 cpen = (tr && tr->again_counter && tr->last_qid && 787 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 788 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 789 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 790 continue; 791 for_each_zcrypt_queue(zq, zc) { 792 /* check if device is usable and eligible */ 793 if (!zq->online || !zq->ops->rsa_modexpo_crt || 794 !ap_queue_usable(zq->queue)) 795 continue; 796 /* check if device node has admission for this queue */ 797 if (!zcrypt_check_queue(perms, 798 AP_QID_QUEUE(zq->queue->qid))) 799 continue; 800 /* penalty if the msg was previously sent at this qid */ 801 qpen = (tr && tr->again_counter && tr->last_qid && 802 tr->last_qid == zq->queue->qid) ? 803 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 804 if (!zcrypt_queue_compare(zq, pref_zq, 805 wgt + cpen + qpen, pref_wgt)) 806 continue; 807 pref_zc = zc; 808 pref_zq = zq; 809 pref_wgt = wgt + cpen + qpen; 810 } 811 } 812 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 813 spin_unlock(&zcrypt_list_lock); 814 815 if (!pref_zq) { 816 pr_debug("%s no matching queue found => ENODEV\n", __func__); 817 rc = -ENODEV; 818 goto out; 819 } 820 821 qid = pref_zq->queue->qid; 822 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 823 824 spin_lock(&zcrypt_list_lock); 825 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 826 spin_unlock(&zcrypt_list_lock); 827 828 out: 829 ap_release_message(&ap_msg); 830 if (tr) { 831 tr->last_rc = rc; 832 tr->last_qid = qid; 833 } 834 trace_s390_zcrypt_rep(crt, func_code, rc, 835 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 836 return rc; 837 } 838 839 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 840 struct zcrypt_track *tr, 841 struct ica_xcRB *xcrb) 842 { 843 struct zcrypt_card *zc, *pref_zc; 844 struct zcrypt_queue *zq, *pref_zq; 845 struct ap_message ap_msg; 846 unsigned int wgt = 0, pref_wgt = 0; 847 unsigned int func_code; 848 unsigned short *domain, tdom; 849 int cpen, qpen, qid = 0, rc = -ENODEV; 850 struct module *mod; 851 852 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 853 854 xcrb->status = 0; 855 ap_init_message(&ap_msg); 856 857 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 858 if (rc) 859 goto out; 860 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1, 861 ap_msg.msg, ap_msg.len, false); 862 863 tdom = *domain; 864 if (perms != &ap_perms && tdom < AP_DOMAINS) { 865 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 866 if (!test_bit_inv(tdom, perms->adm)) { 867 rc = -ENODEV; 868 goto out; 869 } 870 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 871 rc = -EOPNOTSUPP; 872 goto out; 873 } 874 } 875 /* 876 * If a valid target domain is set and this domain is NOT a usage 877 * domain but a control only domain, autoselect target domain. 878 */ 879 if (tdom < AP_DOMAINS && 880 !ap_test_config_usage_domain(tdom) && 881 ap_test_config_ctrl_domain(tdom)) 882 tdom = AUTOSEL_DOM; 883 884 pref_zc = NULL; 885 pref_zq = NULL; 886 spin_lock(&zcrypt_list_lock); 887 for_each_zcrypt_card(zc) { 888 /* Check for usable CCA card */ 889 if (!zc->online || !zc->card->config || zc->card->chkstop || 890 !zc->card->hwinfo.cca) 891 continue; 892 /* Check for user selected CCA card */ 893 if (xcrb->user_defined != AUTOSELECT && 894 xcrb->user_defined != zc->card->id) 895 continue; 896 /* check if request size exceeds card max msg size */ 897 if (ap_msg.len > zc->card->maxmsgsize) 898 continue; 899 /* check if device node has admission for this card */ 900 if (!zcrypt_check_card(perms, zc->card->id)) 901 continue; 902 /* get weight index of the card device */ 903 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 904 /* penalty if this msg was previously sent via this card */ 905 cpen = (tr && tr->again_counter && tr->last_qid && 906 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 907 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 908 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 909 continue; 910 for_each_zcrypt_queue(zq, zc) { 911 /* check for device usable and eligible */ 912 if (!zq->online || !zq->ops->send_cprb || 913 !ap_queue_usable(zq->queue) || 914 (tdom != AUTOSEL_DOM && 915 tdom != AP_QID_QUEUE(zq->queue->qid))) 916 continue; 917 /* check if device node has admission for this queue */ 918 if (!zcrypt_check_queue(perms, 919 AP_QID_QUEUE(zq->queue->qid))) 920 continue; 921 /* penalty if the msg was previously sent at this qid */ 922 qpen = (tr && tr->again_counter && tr->last_qid && 923 tr->last_qid == zq->queue->qid) ? 924 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 925 if (!zcrypt_queue_compare(zq, pref_zq, 926 wgt + cpen + qpen, pref_wgt)) 927 continue; 928 pref_zc = zc; 929 pref_zq = zq; 930 pref_wgt = wgt + cpen + qpen; 931 } 932 } 933 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 934 spin_unlock(&zcrypt_list_lock); 935 936 if (!pref_zq) { 937 pr_debug("%s no match for address %02x.%04x => ENODEV\n", 938 __func__, xcrb->user_defined, *domain); 939 rc = -ENODEV; 940 goto out; 941 } 942 943 /* in case of auto select, provide the correct domain */ 944 qid = pref_zq->queue->qid; 945 if (*domain == AUTOSEL_DOM) 946 *domain = AP_QID_QUEUE(qid); 947 948 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 949 if (!rc) { 950 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 951 ap_msg.msg, ap_msg.len, false); 952 } 953 954 spin_lock(&zcrypt_list_lock); 955 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 956 spin_unlock(&zcrypt_list_lock); 957 958 out: 959 ap_release_message(&ap_msg); 960 if (tr) { 961 tr->last_rc = rc; 962 tr->last_qid = qid; 963 } 964 trace_s390_zcrypt_rep(xcrb, func_code, rc, 965 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 966 return rc; 967 } 968 969 long zcrypt_send_cprb(struct ica_xcRB *xcrb) 970 { 971 struct zcrypt_track tr; 972 int rc; 973 974 memset(&tr, 0, sizeof(tr)); 975 976 do { 977 rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); 978 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 979 980 /* on ENODEV failure: retry once again after a requested rescan */ 981 if (rc == -ENODEV && zcrypt_process_rescan()) 982 do { 983 rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); 984 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 985 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 986 rc = -EIO; 987 if (rc) 988 pr_debug("%s rc=%d\n", __func__, rc); 989 990 return rc; 991 } 992 EXPORT_SYMBOL(zcrypt_send_cprb); 993 994 static bool is_desired_ep11_card(unsigned int dev_id, 995 unsigned short target_num, 996 struct ep11_target_dev *targets) 997 { 998 while (target_num-- > 0) { 999 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 1000 return true; 1001 targets++; 1002 } 1003 return false; 1004 } 1005 1006 static bool is_desired_ep11_queue(unsigned int dev_qid, 1007 unsigned short target_num, 1008 struct ep11_target_dev *targets) 1009 { 1010 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 1011 1012 while (target_num-- > 0) { 1013 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 1014 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1015 return true; 1016 targets++; 1017 } 1018 return false; 1019 } 1020 1021 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, 1022 struct zcrypt_track *tr, 1023 struct ep11_urb *xcrb) 1024 { 1025 struct zcrypt_card *zc, *pref_zc; 1026 struct zcrypt_queue *zq, *pref_zq; 1027 struct ep11_target_dev *targets; 1028 unsigned short target_num; 1029 unsigned int wgt = 0, pref_wgt = 0; 1030 unsigned int func_code, domain; 1031 struct ap_message ap_msg; 1032 int cpen, qpen, qid = 0, rc = -ENODEV; 1033 struct module *mod; 1034 1035 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1036 1037 ap_init_message(&ap_msg); 1038 1039 target_num = (unsigned short)xcrb->targets_num; 1040 1041 /* empty list indicates autoselect (all available targets) */ 1042 targets = NULL; 1043 if (target_num != 0) { 1044 struct ep11_target_dev __user *uptr; 1045 1046 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1047 if (!targets) { 1048 func_code = 0; 1049 rc = -ENOMEM; 1050 goto out; 1051 } 1052 1053 uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1054 if (z_copy_from_user(userspace, targets, uptr, 1055 target_num * sizeof(*targets))) { 1056 func_code = 0; 1057 rc = -EFAULT; 1058 goto out_free; 1059 } 1060 } 1061 1062 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1063 if (rc) 1064 goto out_free; 1065 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, 1066 ap_msg.msg, ap_msg.len, false); 1067 1068 if (perms != &ap_perms && domain < AUTOSEL_DOM) { 1069 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1070 if (!test_bit_inv(domain, perms->adm)) { 1071 rc = -ENODEV; 1072 goto out_free; 1073 } 1074 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1075 rc = -EOPNOTSUPP; 1076 goto out_free; 1077 } 1078 } 1079 1080 pref_zc = NULL; 1081 pref_zq = NULL; 1082 spin_lock(&zcrypt_list_lock); 1083 for_each_zcrypt_card(zc) { 1084 /* Check for usable EP11 card */ 1085 if (!zc->online || !zc->card->config || zc->card->chkstop || 1086 !zc->card->hwinfo.ep11) 1087 continue; 1088 /* Check for user selected EP11 card */ 1089 if (targets && 1090 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1091 continue; 1092 /* check if request size exceeds card max msg size */ 1093 if (ap_msg.len > zc->card->maxmsgsize) 1094 continue; 1095 /* check if device node has admission for this card */ 1096 if (!zcrypt_check_card(perms, zc->card->id)) 1097 continue; 1098 /* get weight index of the card device */ 1099 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1100 /* penalty if this msg was previously sent via this card */ 1101 cpen = (tr && tr->again_counter && tr->last_qid && 1102 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1103 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1104 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1105 continue; 1106 for_each_zcrypt_queue(zq, zc) { 1107 /* check if device is usable and eligible */ 1108 if (!zq->online || !zq->ops->send_ep11_cprb || 1109 !ap_queue_usable(zq->queue) || 1110 (targets && 1111 !is_desired_ep11_queue(zq->queue->qid, 1112 target_num, targets))) 1113 continue; 1114 /* check if device node has admission for this queue */ 1115 if (!zcrypt_check_queue(perms, 1116 AP_QID_QUEUE(zq->queue->qid))) 1117 continue; 1118 /* penalty if the msg was previously sent at this qid */ 1119 qpen = (tr && tr->again_counter && tr->last_qid && 1120 tr->last_qid == zq->queue->qid) ? 1121 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1122 if (!zcrypt_queue_compare(zq, pref_zq, 1123 wgt + cpen + qpen, pref_wgt)) 1124 continue; 1125 pref_zc = zc; 1126 pref_zq = zq; 1127 pref_wgt = wgt + cpen + qpen; 1128 } 1129 } 1130 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1131 spin_unlock(&zcrypt_list_lock); 1132 1133 if (!pref_zq) { 1134 if (targets && target_num == 1) { 1135 pr_debug("%s no match for address %02x.%04x => ENODEV\n", 1136 __func__, (int)targets->ap_id, 1137 (int)targets->dom_id); 1138 } else if (targets) { 1139 pr_debug("%s no match for %d target addrs => ENODEV\n", 1140 __func__, (int)target_num); 1141 } else { 1142 pr_debug("%s no match for address ff.ffff => ENODEV\n", 1143 __func__); 1144 } 1145 rc = -ENODEV; 1146 goto out_free; 1147 } 1148 1149 qid = pref_zq->queue->qid; 1150 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1151 if (!rc) { 1152 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 1153 ap_msg.msg, ap_msg.len, false); 1154 } 1155 1156 spin_lock(&zcrypt_list_lock); 1157 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1158 spin_unlock(&zcrypt_list_lock); 1159 1160 out_free: 1161 kfree(targets); 1162 out: 1163 ap_release_message(&ap_msg); 1164 if (tr) { 1165 tr->last_rc = rc; 1166 tr->last_qid = qid; 1167 } 1168 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1169 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1170 return rc; 1171 } 1172 1173 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 1174 { 1175 struct zcrypt_track tr; 1176 int rc; 1177 1178 memset(&tr, 0, sizeof(tr)); 1179 1180 do { 1181 rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); 1182 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1183 1184 /* on ENODEV failure: retry once again after a requested rescan */ 1185 if (rc == -ENODEV && zcrypt_process_rescan()) 1186 do { 1187 rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); 1188 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1189 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1190 rc = -EIO; 1191 if (rc) 1192 pr_debug("%s rc=%d\n", __func__, rc); 1193 1194 return rc; 1195 } 1196 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1197 1198 static long zcrypt_rng(char *buffer) 1199 { 1200 struct zcrypt_card *zc, *pref_zc; 1201 struct zcrypt_queue *zq, *pref_zq; 1202 unsigned int wgt = 0, pref_wgt = 0; 1203 unsigned int func_code; 1204 struct ap_message ap_msg; 1205 unsigned int domain; 1206 int qid = 0, rc = -ENODEV; 1207 struct module *mod; 1208 1209 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1210 1211 ap_init_message(&ap_msg); 1212 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1213 if (rc) 1214 goto out; 1215 1216 pref_zc = NULL; 1217 pref_zq = NULL; 1218 spin_lock(&zcrypt_list_lock); 1219 for_each_zcrypt_card(zc) { 1220 /* Check for usable CCA card */ 1221 if (!zc->online || !zc->card->config || zc->card->chkstop || 1222 !zc->card->hwinfo.cca) 1223 continue; 1224 /* get weight index of the card device */ 1225 wgt = zc->speed_rating[func_code]; 1226 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1227 continue; 1228 for_each_zcrypt_queue(zq, zc) { 1229 /* check if device is usable and eligible */ 1230 if (!zq->online || !zq->ops->rng || 1231 !ap_queue_usable(zq->queue)) 1232 continue; 1233 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1234 continue; 1235 pref_zc = zc; 1236 pref_zq = zq; 1237 pref_wgt = wgt; 1238 } 1239 } 1240 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1241 spin_unlock(&zcrypt_list_lock); 1242 1243 if (!pref_zq) { 1244 pr_debug("%s no matching queue found => ENODEV\n", __func__); 1245 rc = -ENODEV; 1246 goto out; 1247 } 1248 1249 qid = pref_zq->queue->qid; 1250 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1251 1252 spin_lock(&zcrypt_list_lock); 1253 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1254 spin_unlock(&zcrypt_list_lock); 1255 1256 out: 1257 ap_release_message(&ap_msg); 1258 trace_s390_zcrypt_rep(buffer, func_code, rc, 1259 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1260 return rc; 1261 } 1262 1263 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1264 { 1265 struct zcrypt_card *zc; 1266 struct zcrypt_queue *zq; 1267 struct zcrypt_device_status *stat; 1268 int card, queue; 1269 1270 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1271 * sizeof(struct zcrypt_device_status)); 1272 1273 spin_lock(&zcrypt_list_lock); 1274 for_each_zcrypt_card(zc) { 1275 for_each_zcrypt_queue(zq, zc) { 1276 card = AP_QID_CARD(zq->queue->qid); 1277 if (card >= MAX_ZDEV_CARDIDS) 1278 continue; 1279 queue = AP_QID_QUEUE(zq->queue->qid); 1280 stat = &devstatus[card * AP_DOMAINS + queue]; 1281 stat->hwtype = zc->card->ap_dev.device_type; 1282 stat->functions = zc->card->hwinfo.fac >> 26; 1283 stat->qid = zq->queue->qid; 1284 stat->online = zq->online ? 0x01 : 0x00; 1285 } 1286 } 1287 spin_unlock(&zcrypt_list_lock); 1288 } 1289 1290 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1291 { 1292 struct zcrypt_card *zc; 1293 struct zcrypt_queue *zq; 1294 struct zcrypt_device_status_ext *stat; 1295 int card, queue; 1296 1297 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 1298 * sizeof(struct zcrypt_device_status_ext)); 1299 1300 spin_lock(&zcrypt_list_lock); 1301 for_each_zcrypt_card(zc) { 1302 for_each_zcrypt_queue(zq, zc) { 1303 card = AP_QID_CARD(zq->queue->qid); 1304 queue = AP_QID_QUEUE(zq->queue->qid); 1305 stat = &devstatus[card * AP_DOMAINS + queue]; 1306 stat->hwtype = zc->card->ap_dev.device_type; 1307 stat->functions = zc->card->hwinfo.fac >> 26; 1308 stat->qid = zq->queue->qid; 1309 stat->online = zq->online ? 0x01 : 0x00; 1310 } 1311 } 1312 spin_unlock(&zcrypt_list_lock); 1313 } 1314 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1315 1316 int zcrypt_device_status_ext(int card, int queue, 1317 struct zcrypt_device_status_ext *devstat) 1318 { 1319 struct zcrypt_card *zc; 1320 struct zcrypt_queue *zq; 1321 1322 memset(devstat, 0, sizeof(*devstat)); 1323 1324 spin_lock(&zcrypt_list_lock); 1325 for_each_zcrypt_card(zc) { 1326 for_each_zcrypt_queue(zq, zc) { 1327 if (card == AP_QID_CARD(zq->queue->qid) && 1328 queue == AP_QID_QUEUE(zq->queue->qid)) { 1329 devstat->hwtype = zc->card->ap_dev.device_type; 1330 devstat->functions = zc->card->hwinfo.fac >> 26; 1331 devstat->qid = zq->queue->qid; 1332 devstat->online = zq->online ? 0x01 : 0x00; 1333 spin_unlock(&zcrypt_list_lock); 1334 return 0; 1335 } 1336 } 1337 } 1338 spin_unlock(&zcrypt_list_lock); 1339 1340 return -ENODEV; 1341 } 1342 EXPORT_SYMBOL(zcrypt_device_status_ext); 1343 1344 static void zcrypt_status_mask(char status[], size_t max_adapters) 1345 { 1346 struct zcrypt_card *zc; 1347 struct zcrypt_queue *zq; 1348 int card; 1349 1350 memset(status, 0, max_adapters); 1351 spin_lock(&zcrypt_list_lock); 1352 for_each_zcrypt_card(zc) { 1353 for_each_zcrypt_queue(zq, zc) { 1354 card = AP_QID_CARD(zq->queue->qid); 1355 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1356 card >= max_adapters) 1357 continue; 1358 status[card] = zc->online ? zc->user_space_type : 0x0d; 1359 } 1360 } 1361 spin_unlock(&zcrypt_list_lock); 1362 } 1363 1364 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1365 { 1366 struct zcrypt_card *zc; 1367 struct zcrypt_queue *zq; 1368 int card; 1369 1370 memset(qdepth, 0, max_adapters); 1371 spin_lock(&zcrypt_list_lock); 1372 local_bh_disable(); 1373 for_each_zcrypt_card(zc) { 1374 for_each_zcrypt_queue(zq, zc) { 1375 card = AP_QID_CARD(zq->queue->qid); 1376 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1377 card >= max_adapters) 1378 continue; 1379 spin_lock(&zq->queue->lock); 1380 qdepth[card] = 1381 zq->queue->pendingq_count + 1382 zq->queue->requestq_count; 1383 spin_unlock(&zq->queue->lock); 1384 } 1385 } 1386 local_bh_enable(); 1387 spin_unlock(&zcrypt_list_lock); 1388 } 1389 1390 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1391 { 1392 struct zcrypt_card *zc; 1393 struct zcrypt_queue *zq; 1394 int card; 1395 u64 cnt; 1396 1397 memset(reqcnt, 0, sizeof(int) * max_adapters); 1398 spin_lock(&zcrypt_list_lock); 1399 local_bh_disable(); 1400 for_each_zcrypt_card(zc) { 1401 for_each_zcrypt_queue(zq, zc) { 1402 card = AP_QID_CARD(zq->queue->qid); 1403 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1404 card >= max_adapters) 1405 continue; 1406 spin_lock(&zq->queue->lock); 1407 cnt = zq->queue->total_request_count; 1408 spin_unlock(&zq->queue->lock); 1409 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1410 } 1411 } 1412 local_bh_enable(); 1413 spin_unlock(&zcrypt_list_lock); 1414 } 1415 1416 static int zcrypt_pendingq_count(void) 1417 { 1418 struct zcrypt_card *zc; 1419 struct zcrypt_queue *zq; 1420 int pendingq_count; 1421 1422 pendingq_count = 0; 1423 spin_lock(&zcrypt_list_lock); 1424 local_bh_disable(); 1425 for_each_zcrypt_card(zc) { 1426 for_each_zcrypt_queue(zq, zc) { 1427 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1428 continue; 1429 spin_lock(&zq->queue->lock); 1430 pendingq_count += zq->queue->pendingq_count; 1431 spin_unlock(&zq->queue->lock); 1432 } 1433 } 1434 local_bh_enable(); 1435 spin_unlock(&zcrypt_list_lock); 1436 return pendingq_count; 1437 } 1438 1439 static int zcrypt_requestq_count(void) 1440 { 1441 struct zcrypt_card *zc; 1442 struct zcrypt_queue *zq; 1443 int requestq_count; 1444 1445 requestq_count = 0; 1446 spin_lock(&zcrypt_list_lock); 1447 local_bh_disable(); 1448 for_each_zcrypt_card(zc) { 1449 for_each_zcrypt_queue(zq, zc) { 1450 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1451 continue; 1452 spin_lock(&zq->queue->lock); 1453 requestq_count += zq->queue->requestq_count; 1454 spin_unlock(&zq->queue->lock); 1455 } 1456 } 1457 local_bh_enable(); 1458 spin_unlock(&zcrypt_list_lock); 1459 return requestq_count; 1460 } 1461 1462 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1463 { 1464 int rc; 1465 struct zcrypt_track tr; 1466 struct ica_rsa_modexpo mex; 1467 struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1468 1469 memset(&tr, 0, sizeof(tr)); 1470 if (copy_from_user(&mex, umex, sizeof(mex))) 1471 return -EFAULT; 1472 1473 do { 1474 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1475 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1476 1477 /* on ENODEV failure: retry once again after a requested rescan */ 1478 if (rc == -ENODEV && zcrypt_process_rescan()) 1479 do { 1480 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1481 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1482 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1483 rc = -EIO; 1484 if (rc) { 1485 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc); 1486 return rc; 1487 } 1488 return put_user(mex.outputdatalength, &umex->outputdatalength); 1489 } 1490 1491 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1492 { 1493 int rc; 1494 struct zcrypt_track tr; 1495 struct ica_rsa_modexpo_crt crt; 1496 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1497 1498 memset(&tr, 0, sizeof(tr)); 1499 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1500 return -EFAULT; 1501 1502 do { 1503 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1504 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1505 1506 /* on ENODEV failure: retry once again after a requested rescan */ 1507 if (rc == -ENODEV && zcrypt_process_rescan()) 1508 do { 1509 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1510 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1511 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1512 rc = -EIO; 1513 if (rc) { 1514 pr_debug("ioctl ICARSACRT rc=%d\n", rc); 1515 return rc; 1516 } 1517 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1518 } 1519 1520 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1521 { 1522 int rc; 1523 struct ica_xcRB xcrb; 1524 struct zcrypt_track tr; 1525 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1526 1527 memset(&tr, 0, sizeof(tr)); 1528 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1529 return -EFAULT; 1530 1531 do { 1532 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1533 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1534 1535 /* on ENODEV failure: retry once again after a requested rescan */ 1536 if (rc == -ENODEV && zcrypt_process_rescan()) 1537 do { 1538 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1539 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1540 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1541 rc = -EIO; 1542 if (rc) 1543 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1544 rc, xcrb.status); 1545 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1546 return -EFAULT; 1547 return rc; 1548 } 1549 1550 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1551 { 1552 int rc; 1553 struct ep11_urb xcrb; 1554 struct zcrypt_track tr; 1555 struct ep11_urb __user *uxcrb = (void __user *)arg; 1556 1557 memset(&tr, 0, sizeof(tr)); 1558 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1559 return -EFAULT; 1560 1561 do { 1562 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1563 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1564 1565 /* on ENODEV failure: retry once again after a requested rescan */ 1566 if (rc == -ENODEV && zcrypt_process_rescan()) 1567 do { 1568 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1569 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1570 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1571 rc = -EIO; 1572 if (rc) 1573 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc); 1574 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1575 return -EFAULT; 1576 return rc; 1577 } 1578 1579 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1580 unsigned long arg) 1581 { 1582 int rc; 1583 struct ap_perms *perms = 1584 (struct ap_perms *)filp->private_data; 1585 1586 rc = zcrypt_check_ioctl(perms, cmd); 1587 if (rc) 1588 return rc; 1589 1590 switch (cmd) { 1591 case ICARSAMODEXPO: 1592 return icarsamodexpo_ioctl(perms, arg); 1593 case ICARSACRT: 1594 return icarsacrt_ioctl(perms, arg); 1595 case ZSECSENDCPRB: 1596 return zsecsendcprb_ioctl(perms, arg); 1597 case ZSENDEP11CPRB: 1598 return zsendep11cprb_ioctl(perms, arg); 1599 case ZCRYPT_DEVICE_STATUS: { 1600 struct zcrypt_device_status_ext *device_status; 1601 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1602 * sizeof(struct zcrypt_device_status_ext); 1603 1604 device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1605 sizeof(struct zcrypt_device_status_ext), 1606 GFP_KERNEL); 1607 if (!device_status) 1608 return -ENOMEM; 1609 zcrypt_device_status_mask_ext(device_status); 1610 if (copy_to_user((char __user *)arg, device_status, 1611 total_size)) 1612 rc = -EFAULT; 1613 kvfree(device_status); 1614 return rc; 1615 } 1616 case ZCRYPT_STATUS_MASK: { 1617 char status[AP_DEVICES]; 1618 1619 zcrypt_status_mask(status, AP_DEVICES); 1620 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1621 return -EFAULT; 1622 return 0; 1623 } 1624 case ZCRYPT_QDEPTH_MASK: { 1625 char qdepth[AP_DEVICES]; 1626 1627 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1628 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1629 return -EFAULT; 1630 return 0; 1631 } 1632 case ZCRYPT_PERDEV_REQCNT: { 1633 u32 *reqcnt; 1634 1635 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1636 if (!reqcnt) 1637 return -ENOMEM; 1638 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1639 if (copy_to_user((int __user *)arg, reqcnt, 1640 sizeof(u32) * AP_DEVICES)) 1641 rc = -EFAULT; 1642 kfree(reqcnt); 1643 return rc; 1644 } 1645 case Z90STAT_REQUESTQ_COUNT: 1646 return put_user(zcrypt_requestq_count(), (int __user *)arg); 1647 case Z90STAT_PENDINGQ_COUNT: 1648 return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1649 case Z90STAT_TOTALOPEN_COUNT: 1650 return put_user(atomic_read(&zcrypt_open_count), 1651 (int __user *)arg); 1652 case Z90STAT_DOMAIN_INDEX: 1653 return put_user(ap_domain_index, (int __user *)arg); 1654 /* 1655 * Deprecated ioctls 1656 */ 1657 case ZDEVICESTATUS: { 1658 /* the old ioctl supports only 64 adapters */ 1659 struct zcrypt_device_status *device_status; 1660 size_t total_size = MAX_ZDEV_ENTRIES 1661 * sizeof(struct zcrypt_device_status); 1662 1663 device_status = kzalloc(total_size, GFP_KERNEL); 1664 if (!device_status) 1665 return -ENOMEM; 1666 zcrypt_device_status_mask(device_status); 1667 if (copy_to_user((char __user *)arg, device_status, 1668 total_size)) 1669 rc = -EFAULT; 1670 kfree(device_status); 1671 return rc; 1672 } 1673 case Z90STAT_STATUS_MASK: { 1674 /* the old ioctl supports only 64 adapters */ 1675 char status[MAX_ZDEV_CARDIDS]; 1676 1677 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1678 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1679 return -EFAULT; 1680 return 0; 1681 } 1682 case Z90STAT_QDEPTH_MASK: { 1683 /* the old ioctl supports only 64 adapters */ 1684 char qdepth[MAX_ZDEV_CARDIDS]; 1685 1686 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1687 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1688 return -EFAULT; 1689 return 0; 1690 } 1691 case Z90STAT_PERDEV_REQCNT: { 1692 /* the old ioctl supports only 64 adapters */ 1693 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1694 1695 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1696 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1697 return -EFAULT; 1698 return 0; 1699 } 1700 /* unknown ioctl number */ 1701 default: 1702 pr_debug("unknown ioctl 0x%08x\n", cmd); 1703 return -ENOIOCTLCMD; 1704 } 1705 } 1706 1707 #ifdef CONFIG_COMPAT 1708 /* 1709 * ioctl32 conversion routines 1710 */ 1711 struct compat_ica_rsa_modexpo { 1712 compat_uptr_t inputdata; 1713 unsigned int inputdatalength; 1714 compat_uptr_t outputdata; 1715 unsigned int outputdatalength; 1716 compat_uptr_t b_key; 1717 compat_uptr_t n_modulus; 1718 }; 1719 1720 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1721 unsigned int cmd, unsigned long arg) 1722 { 1723 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1724 struct compat_ica_rsa_modexpo mex32; 1725 struct ica_rsa_modexpo mex64; 1726 struct zcrypt_track tr; 1727 long rc; 1728 1729 memset(&tr, 0, sizeof(tr)); 1730 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1731 return -EFAULT; 1732 mex64.inputdata = compat_ptr(mex32.inputdata); 1733 mex64.inputdatalength = mex32.inputdatalength; 1734 mex64.outputdata = compat_ptr(mex32.outputdata); 1735 mex64.outputdatalength = mex32.outputdatalength; 1736 mex64.b_key = compat_ptr(mex32.b_key); 1737 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1738 do { 1739 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1740 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1741 1742 /* on ENODEV failure: retry once again after a requested rescan */ 1743 if (rc == -ENODEV && zcrypt_process_rescan()) 1744 do { 1745 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1746 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1747 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1748 rc = -EIO; 1749 if (rc) 1750 return rc; 1751 return put_user(mex64.outputdatalength, 1752 &umex32->outputdatalength); 1753 } 1754 1755 struct compat_ica_rsa_modexpo_crt { 1756 compat_uptr_t inputdata; 1757 unsigned int inputdatalength; 1758 compat_uptr_t outputdata; 1759 unsigned int outputdatalength; 1760 compat_uptr_t bp_key; 1761 compat_uptr_t bq_key; 1762 compat_uptr_t np_prime; 1763 compat_uptr_t nq_prime; 1764 compat_uptr_t u_mult_inv; 1765 }; 1766 1767 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1768 unsigned int cmd, unsigned long arg) 1769 { 1770 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1771 struct compat_ica_rsa_modexpo_crt crt32; 1772 struct ica_rsa_modexpo_crt crt64; 1773 struct zcrypt_track tr; 1774 long rc; 1775 1776 memset(&tr, 0, sizeof(tr)); 1777 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1778 return -EFAULT; 1779 crt64.inputdata = compat_ptr(crt32.inputdata); 1780 crt64.inputdatalength = crt32.inputdatalength; 1781 crt64.outputdata = compat_ptr(crt32.outputdata); 1782 crt64.outputdatalength = crt32.outputdatalength; 1783 crt64.bp_key = compat_ptr(crt32.bp_key); 1784 crt64.bq_key = compat_ptr(crt32.bq_key); 1785 crt64.np_prime = compat_ptr(crt32.np_prime); 1786 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1787 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1788 do { 1789 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1790 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1791 1792 /* on ENODEV failure: retry once again after a requested rescan */ 1793 if (rc == -ENODEV && zcrypt_process_rescan()) 1794 do { 1795 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1796 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1797 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1798 rc = -EIO; 1799 if (rc) 1800 return rc; 1801 return put_user(crt64.outputdatalength, 1802 &ucrt32->outputdatalength); 1803 } 1804 1805 struct compat_ica_xcrb { 1806 unsigned short agent_ID; 1807 unsigned int user_defined; 1808 unsigned short request_ID; 1809 unsigned int request_control_blk_length; 1810 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1811 compat_uptr_t request_control_blk_addr; 1812 unsigned int request_data_length; 1813 char padding2[16 - sizeof(compat_uptr_t)]; 1814 compat_uptr_t request_data_address; 1815 unsigned int reply_control_blk_length; 1816 char padding3[16 - sizeof(compat_uptr_t)]; 1817 compat_uptr_t reply_control_blk_addr; 1818 unsigned int reply_data_length; 1819 char padding4[16 - sizeof(compat_uptr_t)]; 1820 compat_uptr_t reply_data_addr; 1821 unsigned short priority_window; 1822 unsigned int status; 1823 } __packed; 1824 1825 static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1826 unsigned int cmd, unsigned long arg) 1827 { 1828 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1829 struct compat_ica_xcrb xcrb32; 1830 struct zcrypt_track tr; 1831 struct ica_xcRB xcrb64; 1832 long rc; 1833 1834 memset(&tr, 0, sizeof(tr)); 1835 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1836 return -EFAULT; 1837 xcrb64.agent_ID = xcrb32.agent_ID; 1838 xcrb64.user_defined = xcrb32.user_defined; 1839 xcrb64.request_ID = xcrb32.request_ID; 1840 xcrb64.request_control_blk_length = 1841 xcrb32.request_control_blk_length; 1842 xcrb64.request_control_blk_addr = 1843 compat_ptr(xcrb32.request_control_blk_addr); 1844 xcrb64.request_data_length = 1845 xcrb32.request_data_length; 1846 xcrb64.request_data_address = 1847 compat_ptr(xcrb32.request_data_address); 1848 xcrb64.reply_control_blk_length = 1849 xcrb32.reply_control_blk_length; 1850 xcrb64.reply_control_blk_addr = 1851 compat_ptr(xcrb32.reply_control_blk_addr); 1852 xcrb64.reply_data_length = xcrb32.reply_data_length; 1853 xcrb64.reply_data_addr = 1854 compat_ptr(xcrb32.reply_data_addr); 1855 xcrb64.priority_window = xcrb32.priority_window; 1856 xcrb64.status = xcrb32.status; 1857 do { 1858 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1859 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1860 1861 /* on ENODEV failure: retry once again after a requested rescan */ 1862 if (rc == -ENODEV && zcrypt_process_rescan()) 1863 do { 1864 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1865 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1866 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1867 rc = -EIO; 1868 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1869 xcrb32.reply_data_length = xcrb64.reply_data_length; 1870 xcrb32.status = xcrb64.status; 1871 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1872 return -EFAULT; 1873 return rc; 1874 } 1875 1876 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1877 unsigned long arg) 1878 { 1879 int rc; 1880 struct ap_perms *perms = 1881 (struct ap_perms *)filp->private_data; 1882 1883 rc = zcrypt_check_ioctl(perms, cmd); 1884 if (rc) 1885 return rc; 1886 1887 if (cmd == ICARSAMODEXPO) 1888 return trans_modexpo32(perms, filp, cmd, arg); 1889 if (cmd == ICARSACRT) 1890 return trans_modexpo_crt32(perms, filp, cmd, arg); 1891 if (cmd == ZSECSENDCPRB) 1892 return trans_xcrb32(perms, filp, cmd, arg); 1893 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1894 } 1895 #endif 1896 1897 /* 1898 * Misc device file operations. 1899 */ 1900 static const struct file_operations zcrypt_fops = { 1901 .owner = THIS_MODULE, 1902 .read = zcrypt_read, 1903 .write = zcrypt_write, 1904 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1905 #ifdef CONFIG_COMPAT 1906 .compat_ioctl = zcrypt_compat_ioctl, 1907 #endif 1908 .open = zcrypt_open, 1909 .release = zcrypt_release, 1910 .llseek = no_llseek, 1911 }; 1912 1913 /* 1914 * Misc device. 1915 */ 1916 static struct miscdevice zcrypt_misc_device = { 1917 .minor = MISC_DYNAMIC_MINOR, 1918 .name = "z90crypt", 1919 .fops = &zcrypt_fops, 1920 }; 1921 1922 static int zcrypt_rng_device_count; 1923 static u32 *zcrypt_rng_buffer; 1924 static int zcrypt_rng_buffer_index; 1925 static DEFINE_MUTEX(zcrypt_rng_mutex); 1926 1927 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1928 { 1929 int rc; 1930 1931 /* 1932 * We don't need locking here because the RNG API guarantees serialized 1933 * read method calls. 1934 */ 1935 if (zcrypt_rng_buffer_index == 0) { 1936 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1937 /* on ENODEV failure: retry once again after an AP bus rescan */ 1938 if (rc == -ENODEV && zcrypt_process_rescan()) 1939 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1940 if (rc < 0) 1941 return -EIO; 1942 zcrypt_rng_buffer_index = rc / sizeof(*data); 1943 } 1944 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1945 return sizeof(*data); 1946 } 1947 1948 static struct hwrng zcrypt_rng_dev = { 1949 .name = "zcrypt", 1950 .data_read = zcrypt_rng_data_read, 1951 .quality = 990, 1952 }; 1953 1954 int zcrypt_rng_device_add(void) 1955 { 1956 int rc = 0; 1957 1958 mutex_lock(&zcrypt_rng_mutex); 1959 if (zcrypt_rng_device_count == 0) { 1960 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 1961 if (!zcrypt_rng_buffer) { 1962 rc = -ENOMEM; 1963 goto out; 1964 } 1965 zcrypt_rng_buffer_index = 0; 1966 rc = hwrng_register(&zcrypt_rng_dev); 1967 if (rc) 1968 goto out_free; 1969 zcrypt_rng_device_count = 1; 1970 } else { 1971 zcrypt_rng_device_count++; 1972 } 1973 mutex_unlock(&zcrypt_rng_mutex); 1974 return 0; 1975 1976 out_free: 1977 free_page((unsigned long)zcrypt_rng_buffer); 1978 out: 1979 mutex_unlock(&zcrypt_rng_mutex); 1980 return rc; 1981 } 1982 1983 void zcrypt_rng_device_remove(void) 1984 { 1985 mutex_lock(&zcrypt_rng_mutex); 1986 zcrypt_rng_device_count--; 1987 if (zcrypt_rng_device_count == 0) { 1988 hwrng_unregister(&zcrypt_rng_dev); 1989 free_page((unsigned long)zcrypt_rng_buffer); 1990 } 1991 mutex_unlock(&zcrypt_rng_mutex); 1992 } 1993 1994 /* 1995 * Wait until the zcrypt api is operational. 1996 * The AP bus scan and the binding of ap devices to device drivers is 1997 * an asynchronous job. This function waits until these initial jobs 1998 * are done and so the zcrypt api should be ready to serve crypto 1999 * requests - if there are resources available. The function uses an 2000 * internal timeout of 30s. The very first caller will either wait for 2001 * ap bus bindings complete or the timeout happens. This state will be 2002 * remembered for further callers which will only be blocked until a 2003 * decision is made (timeout or bindings complete). 2004 * On timeout -ETIME is returned, on success the return value is 0. 2005 */ 2006 int zcrypt_wait_api_operational(void) 2007 { 2008 static DEFINE_MUTEX(zcrypt_wait_api_lock); 2009 static int zcrypt_wait_api_state; 2010 int rc; 2011 2012 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 2013 if (rc) 2014 return rc; 2015 2016 switch (zcrypt_wait_api_state) { 2017 case 0: 2018 /* initial state, invoke wait for the ap bus complete */ 2019 rc = ap_wait_apqn_bindings_complete( 2020 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); 2021 switch (rc) { 2022 case 0: 2023 /* ap bus bindings are complete */ 2024 zcrypt_wait_api_state = 1; 2025 break; 2026 case -EINTR: 2027 /* interrupted, go back to caller */ 2028 break; 2029 case -ETIME: 2030 /* timeout */ 2031 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n", 2032 __func__); 2033 zcrypt_wait_api_state = -ETIME; 2034 break; 2035 default: 2036 /* other failure */ 2037 pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n", 2038 __func__, rc); 2039 break; 2040 } 2041 break; 2042 case 1: 2043 /* a previous caller already found ap bus bindings complete */ 2044 rc = 0; 2045 break; 2046 default: 2047 /* a previous caller had timeout or other failure */ 2048 rc = zcrypt_wait_api_state; 2049 break; 2050 } 2051 2052 mutex_unlock(&zcrypt_wait_api_lock); 2053 2054 return rc; 2055 } 2056 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2057 2058 int __init zcrypt_debug_init(void) 2059 { 2060 zcrypt_dbf_info = debug_register("zcrypt", 2, 1, 2061 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2062 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2063 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2064 2065 return 0; 2066 } 2067 2068 void zcrypt_debug_exit(void) 2069 { 2070 debug_unregister(zcrypt_dbf_info); 2071 } 2072 2073 static int __init zcdn_init(void) 2074 { 2075 int rc; 2076 2077 /* create a new class 'zcrypt' */ 2078 zcrypt_class = class_create(ZCRYPT_NAME); 2079 if (IS_ERR(zcrypt_class)) { 2080 rc = PTR_ERR(zcrypt_class); 2081 goto out_class_create_failed; 2082 } 2083 zcrypt_class->dev_release = zcdn_device_release; 2084 2085 /* alloc device minor range */ 2086 rc = alloc_chrdev_region(&zcrypt_devt, 2087 0, ZCRYPT_MAX_MINOR_NODES, 2088 ZCRYPT_NAME); 2089 if (rc) 2090 goto out_alloc_chrdev_failed; 2091 2092 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2093 zcrypt_cdev.owner = THIS_MODULE; 2094 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2095 if (rc) 2096 goto out_cdev_add_failed; 2097 2098 /* need some class specific sysfs attributes */ 2099 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 2100 if (rc) 2101 goto out_class_create_file_1_failed; 2102 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 2103 if (rc) 2104 goto out_class_create_file_2_failed; 2105 2106 return 0; 2107 2108 out_class_create_file_2_failed: 2109 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2110 out_class_create_file_1_failed: 2111 cdev_del(&zcrypt_cdev); 2112 out_cdev_add_failed: 2113 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2114 out_alloc_chrdev_failed: 2115 class_destroy(zcrypt_class); 2116 out_class_create_failed: 2117 return rc; 2118 } 2119 2120 static void zcdn_exit(void) 2121 { 2122 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2123 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 2124 zcdn_destroy_all(); 2125 cdev_del(&zcrypt_cdev); 2126 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2127 class_destroy(zcrypt_class); 2128 } 2129 2130 /* 2131 * zcrypt_api_init(): Module initialization. 2132 * 2133 * The module initialization code. 2134 */ 2135 int __init zcrypt_api_init(void) 2136 { 2137 int rc; 2138 2139 rc = zcrypt_debug_init(); 2140 if (rc) 2141 goto out; 2142 2143 rc = zcdn_init(); 2144 if (rc) 2145 goto out; 2146 2147 /* Register the request sprayer. */ 2148 rc = misc_register(&zcrypt_misc_device); 2149 if (rc < 0) 2150 goto out_misc_register_failed; 2151 2152 zcrypt_msgtype6_init(); 2153 zcrypt_msgtype50_init(); 2154 2155 return 0; 2156 2157 out_misc_register_failed: 2158 zcdn_exit(); 2159 zcrypt_debug_exit(); 2160 out: 2161 return rc; 2162 } 2163 2164 /* 2165 * zcrypt_api_exit(): Module termination. 2166 * 2167 * The module termination code. 2168 */ 2169 void __exit zcrypt_api_exit(void) 2170 { 2171 zcdn_exit(); 2172 misc_deregister(&zcrypt_misc_device); 2173 zcrypt_msgtype6_exit(); 2174 zcrypt_msgtype50_exit(); 2175 zcrypt_ccamisc_exit(); 2176 zcrypt_ep11misc_exit(); 2177 zcrypt_debug_exit(); 2178 } 2179 2180 module_init(zcrypt_api_init); 2181 module_exit(zcrypt_api_exit); 2182