1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #define KMSG_COMPONENT "zcrypt" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/miscdevice.h> 22 #include <linux/fs.h> 23 #include <linux/compat.h> 24 #include <linux/slab.h> 25 #include <linux/atomic.h> 26 #include <linux/uaccess.h> 27 #include <linux/hw_random.h> 28 #include <linux/debugfs.h> 29 #include <linux/cdev.h> 30 #include <linux/ctype.h> 31 #include <linux/capability.h> 32 #include <asm/debug.h> 33 34 #define CREATE_TRACE_POINTS 35 #include <asm/trace/zcrypt.h> 36 37 #include "zcrypt_api.h" 38 #include "zcrypt_debug.h" 39 40 #include "zcrypt_msgtype6.h" 41 #include "zcrypt_msgtype50.h" 42 #include "zcrypt_ccamisc.h" 43 #include "zcrypt_ep11misc.h" 44 45 /* 46 * Module description. 47 */ 48 MODULE_AUTHOR("IBM Corporation"); 49 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 50 "Copyright IBM Corp. 2001, 2012"); 51 MODULE_LICENSE("GPL"); 52 53 /* 54 * zcrypt tracepoint functions 55 */ 56 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 57 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 58 59 DEFINE_SPINLOCK(zcrypt_list_lock); 60 LIST_HEAD(zcrypt_card_list); 61 62 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 63 64 static LIST_HEAD(zcrypt_ops_list); 65 66 /* Zcrypt related debug feature stuff. */ 67 debug_info_t *zcrypt_dbf_info; 68 69 /* 70 * Process a rescan of the transport layer. 71 * Runs a synchronous AP bus rescan. 72 * Returns true if something has changed (for example the 73 * bus scan has found and build up new devices) and it is 74 * worth to do a retry. Otherwise false is returned meaning 75 * no changes on the AP bus level. 76 */ 77 static inline bool zcrypt_process_rescan(void) 78 { 79 return ap_bus_force_rescan(); 80 } 81 82 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 83 { 84 list_add_tail(&zops->list, &zcrypt_ops_list); 85 } 86 87 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 88 { 89 list_del_init(&zops->list); 90 } 91 92 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 93 { 94 struct zcrypt_ops *zops; 95 96 list_for_each_entry(zops, &zcrypt_ops_list, list) 97 if (zops->variant == variant && 98 (!strncmp(zops->name, name, sizeof(zops->name)))) 99 return zops; 100 return NULL; 101 } 102 EXPORT_SYMBOL(zcrypt_msgtype); 103 104 /* 105 * Multi device nodes extension functions. 106 */ 107 108 struct zcdn_device; 109 110 static struct class *zcrypt_class; 111 static dev_t zcrypt_devt; 112 static struct cdev zcrypt_cdev; 113 114 struct zcdn_device { 115 struct device device; 116 struct ap_perms perms; 117 }; 118 119 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 120 121 #define ZCDN_MAX_NAME 32 122 123 static int zcdn_create(const char *name); 124 static int zcdn_destroy(const char *name); 125 126 /* 127 * Find zcdn device by name. 128 * Returns reference to the zcdn device which needs to be released 129 * with put_device() after use. 130 */ 131 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 132 { 133 struct device *dev = class_find_device_by_name(zcrypt_class, name); 134 135 return dev ? to_zcdn_dev(dev) : NULL; 136 } 137 138 /* 139 * Find zcdn device by devt value. 140 * Returns reference to the zcdn device which needs to be released 141 * with put_device() after use. 142 */ 143 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 144 { 145 struct device *dev = class_find_device_by_devt(zcrypt_class, devt); 146 147 return dev ? to_zcdn_dev(dev) : NULL; 148 } 149 150 static ssize_t ioctlmask_show(struct device *dev, 151 struct device_attribute *attr, 152 char *buf) 153 { 154 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 155 int i, n; 156 157 if (mutex_lock_interruptible(&ap_perms_mutex)) 158 return -ERESTARTSYS; 159 160 n = sysfs_emit(buf, "0x"); 161 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 162 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 163 n += sysfs_emit_at(buf, n, "\n"); 164 165 mutex_unlock(&ap_perms_mutex); 166 167 return n; 168 } 169 170 static ssize_t ioctlmask_store(struct device *dev, 171 struct device_attribute *attr, 172 const char *buf, size_t count) 173 { 174 int rc; 175 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 176 177 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 178 AP_IOCTLS, &ap_perms_mutex); 179 if (rc) 180 return rc; 181 182 return count; 183 } 184 185 static DEVICE_ATTR_RW(ioctlmask); 186 187 static ssize_t apmask_show(struct device *dev, 188 struct device_attribute *attr, 189 char *buf) 190 { 191 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 192 int i, n; 193 194 if (mutex_lock_interruptible(&ap_perms_mutex)) 195 return -ERESTARTSYS; 196 197 n = sysfs_emit(buf, "0x"); 198 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 199 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 200 n += sysfs_emit_at(buf, n, "\n"); 201 202 mutex_unlock(&ap_perms_mutex); 203 204 return n; 205 } 206 207 static ssize_t apmask_store(struct device *dev, 208 struct device_attribute *attr, 209 const char *buf, size_t count) 210 { 211 int rc; 212 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 213 214 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 215 AP_DEVICES, &ap_perms_mutex); 216 if (rc) 217 return rc; 218 219 return count; 220 } 221 222 static DEVICE_ATTR_RW(apmask); 223 224 static ssize_t aqmask_show(struct device *dev, 225 struct device_attribute *attr, 226 char *buf) 227 { 228 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 229 int i, n; 230 231 if (mutex_lock_interruptible(&ap_perms_mutex)) 232 return -ERESTARTSYS; 233 234 n = sysfs_emit(buf, "0x"); 235 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 236 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 237 n += sysfs_emit_at(buf, n, "\n"); 238 239 mutex_unlock(&ap_perms_mutex); 240 241 return n; 242 } 243 244 static ssize_t aqmask_store(struct device *dev, 245 struct device_attribute *attr, 246 const char *buf, size_t count) 247 { 248 int rc; 249 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 250 251 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 252 AP_DOMAINS, &ap_perms_mutex); 253 if (rc) 254 return rc; 255 256 return count; 257 } 258 259 static DEVICE_ATTR_RW(aqmask); 260 261 static ssize_t admask_show(struct device *dev, 262 struct device_attribute *attr, 263 char *buf) 264 { 265 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 266 int i, n; 267 268 if (mutex_lock_interruptible(&ap_perms_mutex)) 269 return -ERESTARTSYS; 270 271 n = sysfs_emit(buf, "0x"); 272 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) 273 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 274 n += sysfs_emit_at(buf, n, "\n"); 275 276 mutex_unlock(&ap_perms_mutex); 277 278 return n; 279 } 280 281 static ssize_t admask_store(struct device *dev, 282 struct device_attribute *attr, 283 const char *buf, size_t count) 284 { 285 int rc; 286 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 287 288 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 289 AP_DOMAINS, &ap_perms_mutex); 290 if (rc) 291 return rc; 292 293 return count; 294 } 295 296 static DEVICE_ATTR_RW(admask); 297 298 static struct attribute *zcdn_dev_attrs[] = { 299 &dev_attr_ioctlmask.attr, 300 &dev_attr_apmask.attr, 301 &dev_attr_aqmask.attr, 302 &dev_attr_admask.attr, 303 NULL 304 }; 305 306 static struct attribute_group zcdn_dev_attr_group = { 307 .attrs = zcdn_dev_attrs 308 }; 309 310 static const struct attribute_group *zcdn_dev_attr_groups[] = { 311 &zcdn_dev_attr_group, 312 NULL 313 }; 314 315 static ssize_t zcdn_create_store(const struct class *class, 316 const struct class_attribute *attr, 317 const char *buf, size_t count) 318 { 319 int rc; 320 char name[ZCDN_MAX_NAME]; 321 322 strscpy(name, skip_spaces(buf), sizeof(name)); 323 324 rc = zcdn_create(strim(name)); 325 326 return rc ? rc : count; 327 } 328 329 static const struct class_attribute class_attr_zcdn_create = 330 __ATTR(create, 0600, NULL, zcdn_create_store); 331 332 static ssize_t zcdn_destroy_store(const struct class *class, 333 const struct class_attribute *attr, 334 const char *buf, size_t count) 335 { 336 int rc; 337 char name[ZCDN_MAX_NAME]; 338 339 strscpy(name, skip_spaces(buf), sizeof(name)); 340 341 rc = zcdn_destroy(strim(name)); 342 343 return rc ? rc : count; 344 } 345 346 static const struct class_attribute class_attr_zcdn_destroy = 347 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 348 349 static void zcdn_device_release(struct device *dev) 350 { 351 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 352 353 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n", 354 __func__, MAJOR(dev->devt), MINOR(dev->devt)); 355 356 kfree(zcdndev); 357 } 358 359 static int zcdn_create(const char *name) 360 { 361 dev_t devt; 362 int i, rc = 0; 363 struct zcdn_device *zcdndev; 364 365 if (mutex_lock_interruptible(&ap_perms_mutex)) 366 return -ERESTARTSYS; 367 368 /* check if device node with this name already exists */ 369 if (name[0]) { 370 zcdndev = find_zcdndev_by_name(name); 371 if (zcdndev) { 372 put_device(&zcdndev->device); 373 rc = -EEXIST; 374 goto unlockout; 375 } 376 } 377 378 /* find an unused minor number */ 379 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 380 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 381 zcdndev = find_zcdndev_by_devt(devt); 382 if (zcdndev) 383 put_device(&zcdndev->device); 384 else 385 break; 386 } 387 if (i == ZCRYPT_MAX_MINOR_NODES) { 388 rc = -ENOSPC; 389 goto unlockout; 390 } 391 392 /* alloc and prepare a new zcdn device */ 393 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 394 if (!zcdndev) { 395 rc = -ENOMEM; 396 goto unlockout; 397 } 398 zcdndev->device.release = zcdn_device_release; 399 zcdndev->device.class = zcrypt_class; 400 zcdndev->device.devt = devt; 401 zcdndev->device.groups = zcdn_dev_attr_groups; 402 if (name[0]) 403 rc = dev_set_name(&zcdndev->device, "%s", name); 404 else 405 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); 406 if (rc) { 407 kfree(zcdndev); 408 goto unlockout; 409 } 410 rc = device_register(&zcdndev->device); 411 if (rc) { 412 put_device(&zcdndev->device); 413 goto unlockout; 414 } 415 416 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n", 417 __func__, MAJOR(devt), MINOR(devt)); 418 419 unlockout: 420 mutex_unlock(&ap_perms_mutex); 421 return rc; 422 } 423 424 static int zcdn_destroy(const char *name) 425 { 426 int rc = 0; 427 struct zcdn_device *zcdndev; 428 429 if (mutex_lock_interruptible(&ap_perms_mutex)) 430 return -ERESTARTSYS; 431 432 /* try to find this zcdn device */ 433 zcdndev = find_zcdndev_by_name(name); 434 if (!zcdndev) { 435 rc = -ENOENT; 436 goto unlockout; 437 } 438 439 /* 440 * The zcdn device is not hard destroyed. It is subject to 441 * reference counting and thus just needs to be unregistered. 442 */ 443 put_device(&zcdndev->device); 444 device_unregister(&zcdndev->device); 445 446 unlockout: 447 mutex_unlock(&ap_perms_mutex); 448 return rc; 449 } 450 451 static void zcdn_destroy_all(void) 452 { 453 int i; 454 dev_t devt; 455 struct zcdn_device *zcdndev; 456 457 mutex_lock(&ap_perms_mutex); 458 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 459 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 460 zcdndev = find_zcdndev_by_devt(devt); 461 if (zcdndev) { 462 put_device(&zcdndev->device); 463 device_unregister(&zcdndev->device); 464 } 465 } 466 mutex_unlock(&ap_perms_mutex); 467 } 468 469 /* 470 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 471 * 472 * This function is not supported beyond zcrypt 1.3.1. 473 */ 474 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 475 size_t count, loff_t *f_pos) 476 { 477 return -EPERM; 478 } 479 480 /* 481 * zcrypt_write(): Not allowed. 482 * 483 * Write is not allowed 484 */ 485 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 486 size_t count, loff_t *f_pos) 487 { 488 return -EPERM; 489 } 490 491 /* 492 * zcrypt_open(): Count number of users. 493 * 494 * Device open function to count number of users. 495 */ 496 static int zcrypt_open(struct inode *inode, struct file *filp) 497 { 498 struct ap_perms *perms = &ap_perms; 499 500 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 501 struct zcdn_device *zcdndev; 502 503 if (mutex_lock_interruptible(&ap_perms_mutex)) 504 return -ERESTARTSYS; 505 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 506 /* find returns a reference, no get_device() needed */ 507 mutex_unlock(&ap_perms_mutex); 508 if (zcdndev) 509 perms = &zcdndev->perms; 510 } 511 filp->private_data = (void *)perms; 512 513 atomic_inc(&zcrypt_open_count); 514 return stream_open(inode, filp); 515 } 516 517 /* 518 * zcrypt_release(): Count number of users. 519 * 520 * Device close function to count number of users. 521 */ 522 static int zcrypt_release(struct inode *inode, struct file *filp) 523 { 524 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 525 struct zcdn_device *zcdndev; 526 527 mutex_lock(&ap_perms_mutex); 528 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 529 mutex_unlock(&ap_perms_mutex); 530 if (zcdndev) { 531 /* 2 puts here: one for find, one for open */ 532 put_device(&zcdndev->device); 533 put_device(&zcdndev->device); 534 } 535 } 536 537 atomic_dec(&zcrypt_open_count); 538 return 0; 539 } 540 541 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 542 unsigned int cmd) 543 { 544 int rc = -EPERM; 545 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 546 547 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 548 if (test_bit_inv(ioctlnr, perms->ioctlm)) 549 rc = 0; 550 } 551 552 if (rc) 553 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n", 554 __func__, ioctlnr, rc); 555 556 return rc; 557 } 558 559 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 560 { 561 return test_bit_inv(card, perms->apm) ? true : false; 562 } 563 564 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 565 { 566 return test_bit_inv(queue, perms->aqm) ? true : false; 567 } 568 569 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 570 struct zcrypt_queue *zq, 571 struct module **pmod, 572 unsigned int weight) 573 { 574 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 575 return NULL; 576 zcrypt_card_get(zc); 577 zcrypt_queue_get(zq); 578 get_device(&zq->queue->ap_dev.device); 579 atomic_add(weight, &zc->load); 580 atomic_add(weight, &zq->load); 581 zq->request_count++; 582 *pmod = zq->queue->ap_dev.device.driver->owner; 583 return zq; 584 } 585 586 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 587 struct zcrypt_queue *zq, 588 struct module *mod, 589 unsigned int weight) 590 { 591 zq->request_count--; 592 atomic_sub(weight, &zc->load); 593 atomic_sub(weight, &zq->load); 594 put_device(&zq->queue->ap_dev.device); 595 zcrypt_queue_put(zq); 596 zcrypt_card_put(zc); 597 module_put(mod); 598 } 599 600 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 601 struct zcrypt_card *pref_zc, 602 unsigned int weight, 603 unsigned int pref_weight) 604 { 605 if (!pref_zc) 606 return true; 607 weight += atomic_read(&zc->load); 608 pref_weight += atomic_read(&pref_zc->load); 609 if (weight == pref_weight) 610 return atomic64_read(&zc->card->total_request_count) < 611 atomic64_read(&pref_zc->card->total_request_count); 612 return weight < pref_weight; 613 } 614 615 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 616 struct zcrypt_queue *pref_zq, 617 unsigned int weight, 618 unsigned int pref_weight) 619 { 620 if (!pref_zq) 621 return true; 622 weight += atomic_read(&zq->load); 623 pref_weight += atomic_read(&pref_zq->load); 624 if (weight == pref_weight) 625 return zq->queue->total_request_count < 626 pref_zq->queue->total_request_count; 627 return weight < pref_weight; 628 } 629 630 /* 631 * zcrypt ioctls. 632 */ 633 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 634 struct zcrypt_track *tr, 635 struct ica_rsa_modexpo *mex) 636 { 637 struct zcrypt_card *zc, *pref_zc; 638 struct zcrypt_queue *zq, *pref_zq; 639 struct ap_message ap_msg; 640 unsigned int wgt = 0, pref_wgt = 0; 641 unsigned int func_code; 642 int cpen, qpen, qid = 0, rc = -ENODEV; 643 struct module *mod; 644 645 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 646 647 ap_init_message(&ap_msg); 648 649 if (mex->outputdatalength < mex->inputdatalength) { 650 func_code = 0; 651 rc = -EINVAL; 652 goto out; 653 } 654 655 /* 656 * As long as outputdatalength is big enough, we can set the 657 * outputdatalength equal to the inputdatalength, since that is the 658 * number of bytes we will copy in any case 659 */ 660 mex->outputdatalength = mex->inputdatalength; 661 662 rc = get_rsa_modex_fc(mex, &func_code); 663 if (rc) 664 goto out; 665 666 pref_zc = NULL; 667 pref_zq = NULL; 668 spin_lock(&zcrypt_list_lock); 669 for_each_zcrypt_card(zc) { 670 /* Check for usable accelerator or CCA card */ 671 if (!zc->online || !zc->card->config || zc->card->chkstop || 672 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 673 continue; 674 /* Check for size limits */ 675 if (zc->min_mod_size > mex->inputdatalength || 676 zc->max_mod_size < mex->inputdatalength) 677 continue; 678 /* check if device node has admission for this card */ 679 if (!zcrypt_check_card(perms, zc->card->id)) 680 continue; 681 /* get weight index of the card device */ 682 wgt = zc->speed_rating[func_code]; 683 /* penalty if this msg was previously sent via this card */ 684 cpen = (tr && tr->again_counter && tr->last_qid && 685 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 686 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 687 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 688 continue; 689 for_each_zcrypt_queue(zq, zc) { 690 /* check if device is usable and eligible */ 691 if (!zq->online || !zq->ops->rsa_modexpo || 692 !ap_queue_usable(zq->queue)) 693 continue; 694 /* check if device node has admission for this queue */ 695 if (!zcrypt_check_queue(perms, 696 AP_QID_QUEUE(zq->queue->qid))) 697 continue; 698 /* penalty if the msg was previously sent at this qid */ 699 qpen = (tr && tr->again_counter && tr->last_qid && 700 tr->last_qid == zq->queue->qid) ? 701 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 702 if (!zcrypt_queue_compare(zq, pref_zq, 703 wgt + cpen + qpen, pref_wgt)) 704 continue; 705 pref_zc = zc; 706 pref_zq = zq; 707 pref_wgt = wgt + cpen + qpen; 708 } 709 } 710 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 711 spin_unlock(&zcrypt_list_lock); 712 713 if (!pref_zq) { 714 pr_debug("%s no matching queue found => ENODEV\n", __func__); 715 rc = -ENODEV; 716 goto out; 717 } 718 719 qid = pref_zq->queue->qid; 720 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 721 722 spin_lock(&zcrypt_list_lock); 723 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 724 spin_unlock(&zcrypt_list_lock); 725 726 out: 727 ap_release_message(&ap_msg); 728 if (tr) { 729 tr->last_rc = rc; 730 tr->last_qid = qid; 731 } 732 trace_s390_zcrypt_rep(mex, func_code, rc, 733 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 734 return rc; 735 } 736 737 static long zcrypt_rsa_crt(struct ap_perms *perms, 738 struct zcrypt_track *tr, 739 struct ica_rsa_modexpo_crt *crt) 740 { 741 struct zcrypt_card *zc, *pref_zc; 742 struct zcrypt_queue *zq, *pref_zq; 743 struct ap_message ap_msg; 744 unsigned int wgt = 0, pref_wgt = 0; 745 unsigned int func_code; 746 int cpen, qpen, qid = 0, rc = -ENODEV; 747 struct module *mod; 748 749 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 750 751 ap_init_message(&ap_msg); 752 753 if (crt->outputdatalength < crt->inputdatalength) { 754 func_code = 0; 755 rc = -EINVAL; 756 goto out; 757 } 758 759 /* 760 * As long as outputdatalength is big enough, we can set the 761 * outputdatalength equal to the inputdatalength, since that is the 762 * number of bytes we will copy in any case 763 */ 764 crt->outputdatalength = crt->inputdatalength; 765 766 rc = get_rsa_crt_fc(crt, &func_code); 767 if (rc) 768 goto out; 769 770 pref_zc = NULL; 771 pref_zq = NULL; 772 spin_lock(&zcrypt_list_lock); 773 for_each_zcrypt_card(zc) { 774 /* Check for usable accelerator or CCA card */ 775 if (!zc->online || !zc->card->config || zc->card->chkstop || 776 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) 777 continue; 778 /* Check for size limits */ 779 if (zc->min_mod_size > crt->inputdatalength || 780 zc->max_mod_size < crt->inputdatalength) 781 continue; 782 /* check if device node has admission for this card */ 783 if (!zcrypt_check_card(perms, zc->card->id)) 784 continue; 785 /* get weight index of the card device */ 786 wgt = zc->speed_rating[func_code]; 787 /* penalty if this msg was previously sent via this card */ 788 cpen = (tr && tr->again_counter && tr->last_qid && 789 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 790 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 791 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 792 continue; 793 for_each_zcrypt_queue(zq, zc) { 794 /* check if device is usable and eligible */ 795 if (!zq->online || !zq->ops->rsa_modexpo_crt || 796 !ap_queue_usable(zq->queue)) 797 continue; 798 /* check if device node has admission for this queue */ 799 if (!zcrypt_check_queue(perms, 800 AP_QID_QUEUE(zq->queue->qid))) 801 continue; 802 /* penalty if the msg was previously sent at this qid */ 803 qpen = (tr && tr->again_counter && tr->last_qid && 804 tr->last_qid == zq->queue->qid) ? 805 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 806 if (!zcrypt_queue_compare(zq, pref_zq, 807 wgt + cpen + qpen, pref_wgt)) 808 continue; 809 pref_zc = zc; 810 pref_zq = zq; 811 pref_wgt = wgt + cpen + qpen; 812 } 813 } 814 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 815 spin_unlock(&zcrypt_list_lock); 816 817 if (!pref_zq) { 818 pr_debug("%s no matching queue found => ENODEV\n", __func__); 819 rc = -ENODEV; 820 goto out; 821 } 822 823 qid = pref_zq->queue->qid; 824 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 825 826 spin_lock(&zcrypt_list_lock); 827 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 828 spin_unlock(&zcrypt_list_lock); 829 830 out: 831 ap_release_message(&ap_msg); 832 if (tr) { 833 tr->last_rc = rc; 834 tr->last_qid = qid; 835 } 836 trace_s390_zcrypt_rep(crt, func_code, rc, 837 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 838 return rc; 839 } 840 841 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 842 struct zcrypt_track *tr, 843 struct ica_xcRB *xcrb) 844 { 845 struct zcrypt_card *zc, *pref_zc; 846 struct zcrypt_queue *zq, *pref_zq; 847 struct ap_message ap_msg; 848 unsigned int wgt = 0, pref_wgt = 0; 849 unsigned int func_code; 850 unsigned short *domain, tdom; 851 int cpen, qpen, qid = 0, rc = -ENODEV; 852 struct module *mod; 853 854 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 855 856 xcrb->status = 0; 857 ap_init_message(&ap_msg); 858 859 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 860 if (rc) 861 goto out; 862 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1, 863 ap_msg.msg, ap_msg.len, false); 864 865 tdom = *domain; 866 if (perms != &ap_perms && tdom < AP_DOMAINS) { 867 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 868 if (!test_bit_inv(tdom, perms->adm)) { 869 rc = -ENODEV; 870 goto out; 871 } 872 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 873 rc = -EOPNOTSUPP; 874 goto out; 875 } 876 } 877 /* 878 * If a valid target domain is set and this domain is NOT a usage 879 * domain but a control only domain, autoselect target domain. 880 */ 881 if (tdom < AP_DOMAINS && 882 !ap_test_config_usage_domain(tdom) && 883 ap_test_config_ctrl_domain(tdom)) 884 tdom = AUTOSEL_DOM; 885 886 pref_zc = NULL; 887 pref_zq = NULL; 888 spin_lock(&zcrypt_list_lock); 889 for_each_zcrypt_card(zc) { 890 /* Check for usable CCA card */ 891 if (!zc->online || !zc->card->config || zc->card->chkstop || 892 !zc->card->hwinfo.cca) 893 continue; 894 /* Check for user selected CCA card */ 895 if (xcrb->user_defined != AUTOSELECT && 896 xcrb->user_defined != zc->card->id) 897 continue; 898 /* check if request size exceeds card max msg size */ 899 if (ap_msg.len > zc->card->maxmsgsize) 900 continue; 901 /* check if device node has admission for this card */ 902 if (!zcrypt_check_card(perms, zc->card->id)) 903 continue; 904 /* get weight index of the card device */ 905 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 906 /* penalty if this msg was previously sent via this card */ 907 cpen = (tr && tr->again_counter && tr->last_qid && 908 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 909 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 910 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 911 continue; 912 for_each_zcrypt_queue(zq, zc) { 913 /* check for device usable and eligible */ 914 if (!zq->online || !zq->ops->send_cprb || 915 !ap_queue_usable(zq->queue) || 916 (tdom != AUTOSEL_DOM && 917 tdom != AP_QID_QUEUE(zq->queue->qid))) 918 continue; 919 /* check if device node has admission for this queue */ 920 if (!zcrypt_check_queue(perms, 921 AP_QID_QUEUE(zq->queue->qid))) 922 continue; 923 /* penalty if the msg was previously sent at this qid */ 924 qpen = (tr && tr->again_counter && tr->last_qid && 925 tr->last_qid == zq->queue->qid) ? 926 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 927 if (!zcrypt_queue_compare(zq, pref_zq, 928 wgt + cpen + qpen, pref_wgt)) 929 continue; 930 pref_zc = zc; 931 pref_zq = zq; 932 pref_wgt = wgt + cpen + qpen; 933 } 934 } 935 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 936 spin_unlock(&zcrypt_list_lock); 937 938 if (!pref_zq) { 939 pr_debug("%s no match for address %02x.%04x => ENODEV\n", 940 __func__, xcrb->user_defined, *domain); 941 rc = -ENODEV; 942 goto out; 943 } 944 945 /* in case of auto select, provide the correct domain */ 946 qid = pref_zq->queue->qid; 947 if (*domain == AUTOSEL_DOM) 948 *domain = AP_QID_QUEUE(qid); 949 950 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 951 if (!rc) { 952 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 953 ap_msg.msg, ap_msg.len, false); 954 } 955 956 spin_lock(&zcrypt_list_lock); 957 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 958 spin_unlock(&zcrypt_list_lock); 959 960 out: 961 ap_release_message(&ap_msg); 962 if (tr) { 963 tr->last_rc = rc; 964 tr->last_qid = qid; 965 } 966 trace_s390_zcrypt_rep(xcrb, func_code, rc, 967 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 968 return rc; 969 } 970 971 long zcrypt_send_cprb(struct ica_xcRB *xcrb) 972 { 973 struct zcrypt_track tr; 974 int rc; 975 976 memset(&tr, 0, sizeof(tr)); 977 978 do { 979 rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); 980 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 981 982 /* on ENODEV failure: retry once again after a requested rescan */ 983 if (rc == -ENODEV && zcrypt_process_rescan()) 984 do { 985 rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); 986 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 987 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 988 rc = -EIO; 989 if (rc) 990 pr_debug("%s rc=%d\n", __func__, rc); 991 992 return rc; 993 } 994 EXPORT_SYMBOL(zcrypt_send_cprb); 995 996 static bool is_desired_ep11_card(unsigned int dev_id, 997 unsigned short target_num, 998 struct ep11_target_dev *targets) 999 { 1000 while (target_num-- > 0) { 1001 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 1002 return true; 1003 targets++; 1004 } 1005 return false; 1006 } 1007 1008 static bool is_desired_ep11_queue(unsigned int dev_qid, 1009 unsigned short target_num, 1010 struct ep11_target_dev *targets) 1011 { 1012 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 1013 1014 while (target_num-- > 0) { 1015 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 1016 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1017 return true; 1018 targets++; 1019 } 1020 return false; 1021 } 1022 1023 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, 1024 struct zcrypt_track *tr, 1025 struct ep11_urb *xcrb) 1026 { 1027 struct zcrypt_card *zc, *pref_zc; 1028 struct zcrypt_queue *zq, *pref_zq; 1029 struct ep11_target_dev *targets; 1030 unsigned short target_num; 1031 unsigned int wgt = 0, pref_wgt = 0; 1032 unsigned int func_code, domain; 1033 struct ap_message ap_msg; 1034 int cpen, qpen, qid = 0, rc = -ENODEV; 1035 struct module *mod; 1036 1037 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1038 1039 ap_init_message(&ap_msg); 1040 1041 target_num = (unsigned short)xcrb->targets_num; 1042 1043 /* empty list indicates autoselect (all available targets) */ 1044 targets = NULL; 1045 if (target_num != 0) { 1046 struct ep11_target_dev __user *uptr; 1047 1048 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1049 if (!targets) { 1050 func_code = 0; 1051 rc = -ENOMEM; 1052 goto out; 1053 } 1054 1055 uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1056 if (z_copy_from_user(userspace, targets, uptr, 1057 target_num * sizeof(*targets))) { 1058 func_code = 0; 1059 rc = -EFAULT; 1060 goto out_free; 1061 } 1062 } 1063 1064 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1065 if (rc) 1066 goto out_free; 1067 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, 1068 ap_msg.msg, ap_msg.len, false); 1069 1070 if (perms != &ap_perms && domain < AUTOSEL_DOM) { 1071 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1072 if (!test_bit_inv(domain, perms->adm)) { 1073 rc = -ENODEV; 1074 goto out_free; 1075 } 1076 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1077 rc = -EOPNOTSUPP; 1078 goto out_free; 1079 } 1080 } 1081 1082 pref_zc = NULL; 1083 pref_zq = NULL; 1084 spin_lock(&zcrypt_list_lock); 1085 for_each_zcrypt_card(zc) { 1086 /* Check for usable EP11 card */ 1087 if (!zc->online || !zc->card->config || zc->card->chkstop || 1088 !zc->card->hwinfo.ep11) 1089 continue; 1090 /* Check for user selected EP11 card */ 1091 if (targets && 1092 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1093 continue; 1094 /* check if request size exceeds card max msg size */ 1095 if (ap_msg.len > zc->card->maxmsgsize) 1096 continue; 1097 /* check if device node has admission for this card */ 1098 if (!zcrypt_check_card(perms, zc->card->id)) 1099 continue; 1100 /* get weight index of the card device */ 1101 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1102 /* penalty if this msg was previously sent via this card */ 1103 cpen = (tr && tr->again_counter && tr->last_qid && 1104 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1105 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1106 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1107 continue; 1108 for_each_zcrypt_queue(zq, zc) { 1109 /* check if device is usable and eligible */ 1110 if (!zq->online || !zq->ops->send_ep11_cprb || 1111 !ap_queue_usable(zq->queue) || 1112 (targets && 1113 !is_desired_ep11_queue(zq->queue->qid, 1114 target_num, targets))) 1115 continue; 1116 /* check if device node has admission for this queue */ 1117 if (!zcrypt_check_queue(perms, 1118 AP_QID_QUEUE(zq->queue->qid))) 1119 continue; 1120 /* penalty if the msg was previously sent at this qid */ 1121 qpen = (tr && tr->again_counter && tr->last_qid && 1122 tr->last_qid == zq->queue->qid) ? 1123 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1124 if (!zcrypt_queue_compare(zq, pref_zq, 1125 wgt + cpen + qpen, pref_wgt)) 1126 continue; 1127 pref_zc = zc; 1128 pref_zq = zq; 1129 pref_wgt = wgt + cpen + qpen; 1130 } 1131 } 1132 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1133 spin_unlock(&zcrypt_list_lock); 1134 1135 if (!pref_zq) { 1136 if (targets && target_num == 1) { 1137 pr_debug("%s no match for address %02x.%04x => ENODEV\n", 1138 __func__, (int)targets->ap_id, 1139 (int)targets->dom_id); 1140 } else if (targets) { 1141 pr_debug("%s no match for %d target addrs => ENODEV\n", 1142 __func__, (int)target_num); 1143 } else { 1144 pr_debug("%s no match for address ff.ffff => ENODEV\n", 1145 __func__); 1146 } 1147 rc = -ENODEV; 1148 goto out_free; 1149 } 1150 1151 qid = pref_zq->queue->qid; 1152 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1153 if (!rc) { 1154 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1, 1155 ap_msg.msg, ap_msg.len, false); 1156 } 1157 1158 spin_lock(&zcrypt_list_lock); 1159 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1160 spin_unlock(&zcrypt_list_lock); 1161 1162 out_free: 1163 kfree(targets); 1164 out: 1165 ap_release_message(&ap_msg); 1166 if (tr) { 1167 tr->last_rc = rc; 1168 tr->last_qid = qid; 1169 } 1170 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1171 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1172 return rc; 1173 } 1174 1175 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 1176 { 1177 struct zcrypt_track tr; 1178 int rc; 1179 1180 memset(&tr, 0, sizeof(tr)); 1181 1182 do { 1183 rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); 1184 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1185 1186 /* on ENODEV failure: retry once again after a requested rescan */ 1187 if (rc == -ENODEV && zcrypt_process_rescan()) 1188 do { 1189 rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); 1190 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1191 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1192 rc = -EIO; 1193 if (rc) 1194 pr_debug("%s rc=%d\n", __func__, rc); 1195 1196 return rc; 1197 } 1198 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1199 1200 static long zcrypt_rng(char *buffer) 1201 { 1202 struct zcrypt_card *zc, *pref_zc; 1203 struct zcrypt_queue *zq, *pref_zq; 1204 unsigned int wgt = 0, pref_wgt = 0; 1205 unsigned int func_code; 1206 struct ap_message ap_msg; 1207 unsigned int domain; 1208 int qid = 0, rc = -ENODEV; 1209 struct module *mod; 1210 1211 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1212 1213 ap_init_message(&ap_msg); 1214 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1215 if (rc) 1216 goto out; 1217 1218 pref_zc = NULL; 1219 pref_zq = NULL; 1220 spin_lock(&zcrypt_list_lock); 1221 for_each_zcrypt_card(zc) { 1222 /* Check for usable CCA card */ 1223 if (!zc->online || !zc->card->config || zc->card->chkstop || 1224 !zc->card->hwinfo.cca) 1225 continue; 1226 /* get weight index of the card device */ 1227 wgt = zc->speed_rating[func_code]; 1228 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1229 continue; 1230 for_each_zcrypt_queue(zq, zc) { 1231 /* check if device is usable and eligible */ 1232 if (!zq->online || !zq->ops->rng || 1233 !ap_queue_usable(zq->queue)) 1234 continue; 1235 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1236 continue; 1237 pref_zc = zc; 1238 pref_zq = zq; 1239 pref_wgt = wgt; 1240 } 1241 } 1242 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1243 spin_unlock(&zcrypt_list_lock); 1244 1245 if (!pref_zq) { 1246 pr_debug("%s no matching queue found => ENODEV\n", __func__); 1247 rc = -ENODEV; 1248 goto out; 1249 } 1250 1251 qid = pref_zq->queue->qid; 1252 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1253 1254 spin_lock(&zcrypt_list_lock); 1255 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1256 spin_unlock(&zcrypt_list_lock); 1257 1258 out: 1259 ap_release_message(&ap_msg); 1260 trace_s390_zcrypt_rep(buffer, func_code, rc, 1261 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1262 return rc; 1263 } 1264 1265 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1266 { 1267 struct zcrypt_card *zc; 1268 struct zcrypt_queue *zq; 1269 struct zcrypt_device_status *stat; 1270 int card, queue; 1271 1272 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1273 * sizeof(struct zcrypt_device_status)); 1274 1275 spin_lock(&zcrypt_list_lock); 1276 for_each_zcrypt_card(zc) { 1277 for_each_zcrypt_queue(zq, zc) { 1278 card = AP_QID_CARD(zq->queue->qid); 1279 if (card >= MAX_ZDEV_CARDIDS) 1280 continue; 1281 queue = AP_QID_QUEUE(zq->queue->qid); 1282 stat = &devstatus[card * AP_DOMAINS + queue]; 1283 stat->hwtype = zc->card->ap_dev.device_type; 1284 stat->functions = zc->card->hwinfo.fac >> 26; 1285 stat->qid = zq->queue->qid; 1286 stat->online = zq->online ? 0x01 : 0x00; 1287 } 1288 } 1289 spin_unlock(&zcrypt_list_lock); 1290 } 1291 1292 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1293 { 1294 struct zcrypt_card *zc; 1295 struct zcrypt_queue *zq; 1296 struct zcrypt_device_status_ext *stat; 1297 int card, queue; 1298 1299 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 1300 * sizeof(struct zcrypt_device_status_ext)); 1301 1302 spin_lock(&zcrypt_list_lock); 1303 for_each_zcrypt_card(zc) { 1304 for_each_zcrypt_queue(zq, zc) { 1305 card = AP_QID_CARD(zq->queue->qid); 1306 queue = AP_QID_QUEUE(zq->queue->qid); 1307 stat = &devstatus[card * AP_DOMAINS + queue]; 1308 stat->hwtype = zc->card->ap_dev.device_type; 1309 stat->functions = zc->card->hwinfo.fac >> 26; 1310 stat->qid = zq->queue->qid; 1311 stat->online = zq->online ? 0x01 : 0x00; 1312 } 1313 } 1314 spin_unlock(&zcrypt_list_lock); 1315 } 1316 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1317 1318 int zcrypt_device_status_ext(int card, int queue, 1319 struct zcrypt_device_status_ext *devstat) 1320 { 1321 struct zcrypt_card *zc; 1322 struct zcrypt_queue *zq; 1323 1324 memset(devstat, 0, sizeof(*devstat)); 1325 1326 spin_lock(&zcrypt_list_lock); 1327 for_each_zcrypt_card(zc) { 1328 for_each_zcrypt_queue(zq, zc) { 1329 if (card == AP_QID_CARD(zq->queue->qid) && 1330 queue == AP_QID_QUEUE(zq->queue->qid)) { 1331 devstat->hwtype = zc->card->ap_dev.device_type; 1332 devstat->functions = zc->card->hwinfo.fac >> 26; 1333 devstat->qid = zq->queue->qid; 1334 devstat->online = zq->online ? 0x01 : 0x00; 1335 spin_unlock(&zcrypt_list_lock); 1336 return 0; 1337 } 1338 } 1339 } 1340 spin_unlock(&zcrypt_list_lock); 1341 1342 return -ENODEV; 1343 } 1344 EXPORT_SYMBOL(zcrypt_device_status_ext); 1345 1346 static void zcrypt_status_mask(char status[], size_t max_adapters) 1347 { 1348 struct zcrypt_card *zc; 1349 struct zcrypt_queue *zq; 1350 int card; 1351 1352 memset(status, 0, max_adapters); 1353 spin_lock(&zcrypt_list_lock); 1354 for_each_zcrypt_card(zc) { 1355 for_each_zcrypt_queue(zq, zc) { 1356 card = AP_QID_CARD(zq->queue->qid); 1357 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1358 card >= max_adapters) 1359 continue; 1360 status[card] = zc->online ? zc->user_space_type : 0x0d; 1361 } 1362 } 1363 spin_unlock(&zcrypt_list_lock); 1364 } 1365 1366 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1367 { 1368 struct zcrypt_card *zc; 1369 struct zcrypt_queue *zq; 1370 int card; 1371 1372 memset(qdepth, 0, max_adapters); 1373 spin_lock(&zcrypt_list_lock); 1374 local_bh_disable(); 1375 for_each_zcrypt_card(zc) { 1376 for_each_zcrypt_queue(zq, zc) { 1377 card = AP_QID_CARD(zq->queue->qid); 1378 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1379 card >= max_adapters) 1380 continue; 1381 spin_lock(&zq->queue->lock); 1382 qdepth[card] = 1383 zq->queue->pendingq_count + 1384 zq->queue->requestq_count; 1385 spin_unlock(&zq->queue->lock); 1386 } 1387 } 1388 local_bh_enable(); 1389 spin_unlock(&zcrypt_list_lock); 1390 } 1391 1392 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1393 { 1394 struct zcrypt_card *zc; 1395 struct zcrypt_queue *zq; 1396 int card; 1397 u64 cnt; 1398 1399 memset(reqcnt, 0, sizeof(int) * max_adapters); 1400 spin_lock(&zcrypt_list_lock); 1401 local_bh_disable(); 1402 for_each_zcrypt_card(zc) { 1403 for_each_zcrypt_queue(zq, zc) { 1404 card = AP_QID_CARD(zq->queue->qid); 1405 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1406 card >= max_adapters) 1407 continue; 1408 spin_lock(&zq->queue->lock); 1409 cnt = zq->queue->total_request_count; 1410 spin_unlock(&zq->queue->lock); 1411 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1412 } 1413 } 1414 local_bh_enable(); 1415 spin_unlock(&zcrypt_list_lock); 1416 } 1417 1418 static int zcrypt_pendingq_count(void) 1419 { 1420 struct zcrypt_card *zc; 1421 struct zcrypt_queue *zq; 1422 int pendingq_count; 1423 1424 pendingq_count = 0; 1425 spin_lock(&zcrypt_list_lock); 1426 local_bh_disable(); 1427 for_each_zcrypt_card(zc) { 1428 for_each_zcrypt_queue(zq, zc) { 1429 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1430 continue; 1431 spin_lock(&zq->queue->lock); 1432 pendingq_count += zq->queue->pendingq_count; 1433 spin_unlock(&zq->queue->lock); 1434 } 1435 } 1436 local_bh_enable(); 1437 spin_unlock(&zcrypt_list_lock); 1438 return pendingq_count; 1439 } 1440 1441 static int zcrypt_requestq_count(void) 1442 { 1443 struct zcrypt_card *zc; 1444 struct zcrypt_queue *zq; 1445 int requestq_count; 1446 1447 requestq_count = 0; 1448 spin_lock(&zcrypt_list_lock); 1449 local_bh_disable(); 1450 for_each_zcrypt_card(zc) { 1451 for_each_zcrypt_queue(zq, zc) { 1452 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1453 continue; 1454 spin_lock(&zq->queue->lock); 1455 requestq_count += zq->queue->requestq_count; 1456 spin_unlock(&zq->queue->lock); 1457 } 1458 } 1459 local_bh_enable(); 1460 spin_unlock(&zcrypt_list_lock); 1461 return requestq_count; 1462 } 1463 1464 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1465 { 1466 int rc; 1467 struct zcrypt_track tr; 1468 struct ica_rsa_modexpo mex; 1469 struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1470 1471 memset(&tr, 0, sizeof(tr)); 1472 if (copy_from_user(&mex, umex, sizeof(mex))) 1473 return -EFAULT; 1474 1475 do { 1476 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1477 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1478 1479 /* on ENODEV failure: retry once again after a requested rescan */ 1480 if (rc == -ENODEV && zcrypt_process_rescan()) 1481 do { 1482 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1483 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1484 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1485 rc = -EIO; 1486 if (rc) { 1487 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc); 1488 return rc; 1489 } 1490 return put_user(mex.outputdatalength, &umex->outputdatalength); 1491 } 1492 1493 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1494 { 1495 int rc; 1496 struct zcrypt_track tr; 1497 struct ica_rsa_modexpo_crt crt; 1498 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1499 1500 memset(&tr, 0, sizeof(tr)); 1501 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1502 return -EFAULT; 1503 1504 do { 1505 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1506 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1507 1508 /* on ENODEV failure: retry once again after a requested rescan */ 1509 if (rc == -ENODEV && zcrypt_process_rescan()) 1510 do { 1511 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1512 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1513 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1514 rc = -EIO; 1515 if (rc) { 1516 pr_debug("ioctl ICARSACRT rc=%d\n", rc); 1517 return rc; 1518 } 1519 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1520 } 1521 1522 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1523 { 1524 int rc; 1525 struct ica_xcRB xcrb; 1526 struct zcrypt_track tr; 1527 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1528 1529 memset(&tr, 0, sizeof(tr)); 1530 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1531 return -EFAULT; 1532 1533 do { 1534 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1535 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1536 1537 /* on ENODEV failure: retry once again after a requested rescan */ 1538 if (rc == -ENODEV && zcrypt_process_rescan()) 1539 do { 1540 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1541 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1542 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1543 rc = -EIO; 1544 if (rc) 1545 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1546 rc, xcrb.status); 1547 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1548 return -EFAULT; 1549 return rc; 1550 } 1551 1552 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1553 { 1554 int rc; 1555 struct ep11_urb xcrb; 1556 struct zcrypt_track tr; 1557 struct ep11_urb __user *uxcrb = (void __user *)arg; 1558 1559 memset(&tr, 0, sizeof(tr)); 1560 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1561 return -EFAULT; 1562 1563 do { 1564 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1565 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1566 1567 /* on ENODEV failure: retry once again after a requested rescan */ 1568 if (rc == -ENODEV && zcrypt_process_rescan()) 1569 do { 1570 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1571 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1572 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1573 rc = -EIO; 1574 if (rc) 1575 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc); 1576 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1577 return -EFAULT; 1578 return rc; 1579 } 1580 1581 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1582 unsigned long arg) 1583 { 1584 int rc; 1585 struct ap_perms *perms = 1586 (struct ap_perms *)filp->private_data; 1587 1588 rc = zcrypt_check_ioctl(perms, cmd); 1589 if (rc) 1590 return rc; 1591 1592 switch (cmd) { 1593 case ICARSAMODEXPO: 1594 return icarsamodexpo_ioctl(perms, arg); 1595 case ICARSACRT: 1596 return icarsacrt_ioctl(perms, arg); 1597 case ZSECSENDCPRB: 1598 return zsecsendcprb_ioctl(perms, arg); 1599 case ZSENDEP11CPRB: 1600 return zsendep11cprb_ioctl(perms, arg); 1601 case ZCRYPT_DEVICE_STATUS: { 1602 struct zcrypt_device_status_ext *device_status; 1603 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1604 * sizeof(struct zcrypt_device_status_ext); 1605 1606 device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1607 sizeof(struct zcrypt_device_status_ext), 1608 GFP_KERNEL); 1609 if (!device_status) 1610 return -ENOMEM; 1611 zcrypt_device_status_mask_ext(device_status); 1612 if (copy_to_user((char __user *)arg, device_status, 1613 total_size)) 1614 rc = -EFAULT; 1615 kvfree(device_status); 1616 return rc; 1617 } 1618 case ZCRYPT_STATUS_MASK: { 1619 char status[AP_DEVICES]; 1620 1621 zcrypt_status_mask(status, AP_DEVICES); 1622 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1623 return -EFAULT; 1624 return 0; 1625 } 1626 case ZCRYPT_QDEPTH_MASK: { 1627 char qdepth[AP_DEVICES]; 1628 1629 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1630 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1631 return -EFAULT; 1632 return 0; 1633 } 1634 case ZCRYPT_PERDEV_REQCNT: { 1635 u32 *reqcnt; 1636 1637 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1638 if (!reqcnt) 1639 return -ENOMEM; 1640 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1641 if (copy_to_user((int __user *)arg, reqcnt, 1642 sizeof(u32) * AP_DEVICES)) 1643 rc = -EFAULT; 1644 kfree(reqcnt); 1645 return rc; 1646 } 1647 case Z90STAT_REQUESTQ_COUNT: 1648 return put_user(zcrypt_requestq_count(), (int __user *)arg); 1649 case Z90STAT_PENDINGQ_COUNT: 1650 return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1651 case Z90STAT_TOTALOPEN_COUNT: 1652 return put_user(atomic_read(&zcrypt_open_count), 1653 (int __user *)arg); 1654 case Z90STAT_DOMAIN_INDEX: 1655 return put_user(ap_domain_index, (int __user *)arg); 1656 /* 1657 * Deprecated ioctls 1658 */ 1659 case ZDEVICESTATUS: { 1660 /* the old ioctl supports only 64 adapters */ 1661 struct zcrypt_device_status *device_status; 1662 size_t total_size = MAX_ZDEV_ENTRIES 1663 * sizeof(struct zcrypt_device_status); 1664 1665 device_status = kzalloc(total_size, GFP_KERNEL); 1666 if (!device_status) 1667 return -ENOMEM; 1668 zcrypt_device_status_mask(device_status); 1669 if (copy_to_user((char __user *)arg, device_status, 1670 total_size)) 1671 rc = -EFAULT; 1672 kfree(device_status); 1673 return rc; 1674 } 1675 case Z90STAT_STATUS_MASK: { 1676 /* the old ioctl supports only 64 adapters */ 1677 char status[MAX_ZDEV_CARDIDS]; 1678 1679 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1680 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1681 return -EFAULT; 1682 return 0; 1683 } 1684 case Z90STAT_QDEPTH_MASK: { 1685 /* the old ioctl supports only 64 adapters */ 1686 char qdepth[MAX_ZDEV_CARDIDS]; 1687 1688 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1689 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1690 return -EFAULT; 1691 return 0; 1692 } 1693 case Z90STAT_PERDEV_REQCNT: { 1694 /* the old ioctl supports only 64 adapters */ 1695 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1696 1697 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1698 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1699 return -EFAULT; 1700 return 0; 1701 } 1702 /* unknown ioctl number */ 1703 default: 1704 pr_debug("unknown ioctl 0x%08x\n", cmd); 1705 return -ENOIOCTLCMD; 1706 } 1707 } 1708 1709 #ifdef CONFIG_COMPAT 1710 /* 1711 * ioctl32 conversion routines 1712 */ 1713 struct compat_ica_rsa_modexpo { 1714 compat_uptr_t inputdata; 1715 unsigned int inputdatalength; 1716 compat_uptr_t outputdata; 1717 unsigned int outputdatalength; 1718 compat_uptr_t b_key; 1719 compat_uptr_t n_modulus; 1720 }; 1721 1722 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1723 unsigned int cmd, unsigned long arg) 1724 { 1725 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1726 struct compat_ica_rsa_modexpo mex32; 1727 struct ica_rsa_modexpo mex64; 1728 struct zcrypt_track tr; 1729 long rc; 1730 1731 memset(&tr, 0, sizeof(tr)); 1732 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1733 return -EFAULT; 1734 mex64.inputdata = compat_ptr(mex32.inputdata); 1735 mex64.inputdatalength = mex32.inputdatalength; 1736 mex64.outputdata = compat_ptr(mex32.outputdata); 1737 mex64.outputdatalength = mex32.outputdatalength; 1738 mex64.b_key = compat_ptr(mex32.b_key); 1739 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1740 do { 1741 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1742 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1743 1744 /* on ENODEV failure: retry once again after a requested rescan */ 1745 if (rc == -ENODEV && zcrypt_process_rescan()) 1746 do { 1747 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1748 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1749 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1750 rc = -EIO; 1751 if (rc) 1752 return rc; 1753 return put_user(mex64.outputdatalength, 1754 &umex32->outputdatalength); 1755 } 1756 1757 struct compat_ica_rsa_modexpo_crt { 1758 compat_uptr_t inputdata; 1759 unsigned int inputdatalength; 1760 compat_uptr_t outputdata; 1761 unsigned int outputdatalength; 1762 compat_uptr_t bp_key; 1763 compat_uptr_t bq_key; 1764 compat_uptr_t np_prime; 1765 compat_uptr_t nq_prime; 1766 compat_uptr_t u_mult_inv; 1767 }; 1768 1769 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1770 unsigned int cmd, unsigned long arg) 1771 { 1772 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1773 struct compat_ica_rsa_modexpo_crt crt32; 1774 struct ica_rsa_modexpo_crt crt64; 1775 struct zcrypt_track tr; 1776 long rc; 1777 1778 memset(&tr, 0, sizeof(tr)); 1779 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1780 return -EFAULT; 1781 crt64.inputdata = compat_ptr(crt32.inputdata); 1782 crt64.inputdatalength = crt32.inputdatalength; 1783 crt64.outputdata = compat_ptr(crt32.outputdata); 1784 crt64.outputdatalength = crt32.outputdatalength; 1785 crt64.bp_key = compat_ptr(crt32.bp_key); 1786 crt64.bq_key = compat_ptr(crt32.bq_key); 1787 crt64.np_prime = compat_ptr(crt32.np_prime); 1788 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1789 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1790 do { 1791 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1792 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1793 1794 /* on ENODEV failure: retry once again after a requested rescan */ 1795 if (rc == -ENODEV && zcrypt_process_rescan()) 1796 do { 1797 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1798 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1799 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1800 rc = -EIO; 1801 if (rc) 1802 return rc; 1803 return put_user(crt64.outputdatalength, 1804 &ucrt32->outputdatalength); 1805 } 1806 1807 struct compat_ica_xcrb { 1808 unsigned short agent_ID; 1809 unsigned int user_defined; 1810 unsigned short request_ID; 1811 unsigned int request_control_blk_length; 1812 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1813 compat_uptr_t request_control_blk_addr; 1814 unsigned int request_data_length; 1815 char padding2[16 - sizeof(compat_uptr_t)]; 1816 compat_uptr_t request_data_address; 1817 unsigned int reply_control_blk_length; 1818 char padding3[16 - sizeof(compat_uptr_t)]; 1819 compat_uptr_t reply_control_blk_addr; 1820 unsigned int reply_data_length; 1821 char padding4[16 - sizeof(compat_uptr_t)]; 1822 compat_uptr_t reply_data_addr; 1823 unsigned short priority_window; 1824 unsigned int status; 1825 } __packed; 1826 1827 static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1828 unsigned int cmd, unsigned long arg) 1829 { 1830 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1831 struct compat_ica_xcrb xcrb32; 1832 struct zcrypt_track tr; 1833 struct ica_xcRB xcrb64; 1834 long rc; 1835 1836 memset(&tr, 0, sizeof(tr)); 1837 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1838 return -EFAULT; 1839 xcrb64.agent_ID = xcrb32.agent_ID; 1840 xcrb64.user_defined = xcrb32.user_defined; 1841 xcrb64.request_ID = xcrb32.request_ID; 1842 xcrb64.request_control_blk_length = 1843 xcrb32.request_control_blk_length; 1844 xcrb64.request_control_blk_addr = 1845 compat_ptr(xcrb32.request_control_blk_addr); 1846 xcrb64.request_data_length = 1847 xcrb32.request_data_length; 1848 xcrb64.request_data_address = 1849 compat_ptr(xcrb32.request_data_address); 1850 xcrb64.reply_control_blk_length = 1851 xcrb32.reply_control_blk_length; 1852 xcrb64.reply_control_blk_addr = 1853 compat_ptr(xcrb32.reply_control_blk_addr); 1854 xcrb64.reply_data_length = xcrb32.reply_data_length; 1855 xcrb64.reply_data_addr = 1856 compat_ptr(xcrb32.reply_data_addr); 1857 xcrb64.priority_window = xcrb32.priority_window; 1858 xcrb64.status = xcrb32.status; 1859 do { 1860 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1861 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1862 1863 /* on ENODEV failure: retry once again after a requested rescan */ 1864 if (rc == -ENODEV && zcrypt_process_rescan()) 1865 do { 1866 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1867 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1868 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1869 rc = -EIO; 1870 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1871 xcrb32.reply_data_length = xcrb64.reply_data_length; 1872 xcrb32.status = xcrb64.status; 1873 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1874 return -EFAULT; 1875 return rc; 1876 } 1877 1878 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1879 unsigned long arg) 1880 { 1881 int rc; 1882 struct ap_perms *perms = 1883 (struct ap_perms *)filp->private_data; 1884 1885 rc = zcrypt_check_ioctl(perms, cmd); 1886 if (rc) 1887 return rc; 1888 1889 if (cmd == ICARSAMODEXPO) 1890 return trans_modexpo32(perms, filp, cmd, arg); 1891 if (cmd == ICARSACRT) 1892 return trans_modexpo_crt32(perms, filp, cmd, arg); 1893 if (cmd == ZSECSENDCPRB) 1894 return trans_xcrb32(perms, filp, cmd, arg); 1895 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1896 } 1897 #endif 1898 1899 /* 1900 * Misc device file operations. 1901 */ 1902 static const struct file_operations zcrypt_fops = { 1903 .owner = THIS_MODULE, 1904 .read = zcrypt_read, 1905 .write = zcrypt_write, 1906 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1907 #ifdef CONFIG_COMPAT 1908 .compat_ioctl = zcrypt_compat_ioctl, 1909 #endif 1910 .open = zcrypt_open, 1911 .release = zcrypt_release, 1912 .llseek = no_llseek, 1913 }; 1914 1915 /* 1916 * Misc device. 1917 */ 1918 static struct miscdevice zcrypt_misc_device = { 1919 .minor = MISC_DYNAMIC_MINOR, 1920 .name = "z90crypt", 1921 .fops = &zcrypt_fops, 1922 }; 1923 1924 static int zcrypt_rng_device_count; 1925 static u32 *zcrypt_rng_buffer; 1926 static int zcrypt_rng_buffer_index; 1927 static DEFINE_MUTEX(zcrypt_rng_mutex); 1928 1929 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1930 { 1931 int rc; 1932 1933 /* 1934 * We don't need locking here because the RNG API guarantees serialized 1935 * read method calls. 1936 */ 1937 if (zcrypt_rng_buffer_index == 0) { 1938 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1939 /* on ENODEV failure: retry once again after an AP bus rescan */ 1940 if (rc == -ENODEV && zcrypt_process_rescan()) 1941 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1942 if (rc < 0) 1943 return -EIO; 1944 zcrypt_rng_buffer_index = rc / sizeof(*data); 1945 } 1946 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1947 return sizeof(*data); 1948 } 1949 1950 static struct hwrng zcrypt_rng_dev = { 1951 .name = "zcrypt", 1952 .data_read = zcrypt_rng_data_read, 1953 .quality = 990, 1954 }; 1955 1956 int zcrypt_rng_device_add(void) 1957 { 1958 int rc = 0; 1959 1960 mutex_lock(&zcrypt_rng_mutex); 1961 if (zcrypt_rng_device_count == 0) { 1962 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 1963 if (!zcrypt_rng_buffer) { 1964 rc = -ENOMEM; 1965 goto out; 1966 } 1967 zcrypt_rng_buffer_index = 0; 1968 rc = hwrng_register(&zcrypt_rng_dev); 1969 if (rc) 1970 goto out_free; 1971 zcrypt_rng_device_count = 1; 1972 } else { 1973 zcrypt_rng_device_count++; 1974 } 1975 mutex_unlock(&zcrypt_rng_mutex); 1976 return 0; 1977 1978 out_free: 1979 free_page((unsigned long)zcrypt_rng_buffer); 1980 out: 1981 mutex_unlock(&zcrypt_rng_mutex); 1982 return rc; 1983 } 1984 1985 void zcrypt_rng_device_remove(void) 1986 { 1987 mutex_lock(&zcrypt_rng_mutex); 1988 zcrypt_rng_device_count--; 1989 if (zcrypt_rng_device_count == 0) { 1990 hwrng_unregister(&zcrypt_rng_dev); 1991 free_page((unsigned long)zcrypt_rng_buffer); 1992 } 1993 mutex_unlock(&zcrypt_rng_mutex); 1994 } 1995 1996 /* 1997 * Wait until the zcrypt api is operational. 1998 * The AP bus scan and the binding of ap devices to device drivers is 1999 * an asynchronous job. This function waits until these initial jobs 2000 * are done and so the zcrypt api should be ready to serve crypto 2001 * requests - if there are resources available. The function uses an 2002 * internal timeout of 30s. The very first caller will either wait for 2003 * ap bus bindings complete or the timeout happens. This state will be 2004 * remembered for further callers which will only be blocked until a 2005 * decision is made (timeout or bindings complete). 2006 * On timeout -ETIME is returned, on success the return value is 0. 2007 */ 2008 int zcrypt_wait_api_operational(void) 2009 { 2010 static DEFINE_MUTEX(zcrypt_wait_api_lock); 2011 static int zcrypt_wait_api_state; 2012 int rc; 2013 2014 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 2015 if (rc) 2016 return rc; 2017 2018 switch (zcrypt_wait_api_state) { 2019 case 0: 2020 /* initial state, invoke wait for the ap bus complete */ 2021 rc = ap_wait_apqn_bindings_complete( 2022 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); 2023 switch (rc) { 2024 case 0: 2025 /* ap bus bindings are complete */ 2026 zcrypt_wait_api_state = 1; 2027 break; 2028 case -EINTR: 2029 /* interrupted, go back to caller */ 2030 break; 2031 case -ETIME: 2032 /* timeout */ 2033 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n", 2034 __func__); 2035 zcrypt_wait_api_state = -ETIME; 2036 break; 2037 default: 2038 /* other failure */ 2039 pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n", 2040 __func__, rc); 2041 break; 2042 } 2043 break; 2044 case 1: 2045 /* a previous caller already found ap bus bindings complete */ 2046 rc = 0; 2047 break; 2048 default: 2049 /* a previous caller had timeout or other failure */ 2050 rc = zcrypt_wait_api_state; 2051 break; 2052 } 2053 2054 mutex_unlock(&zcrypt_wait_api_lock); 2055 2056 return rc; 2057 } 2058 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2059 2060 int __init zcrypt_debug_init(void) 2061 { 2062 zcrypt_dbf_info = debug_register("zcrypt", 2, 1, 2063 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2064 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2065 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2066 2067 return 0; 2068 } 2069 2070 void zcrypt_debug_exit(void) 2071 { 2072 debug_unregister(zcrypt_dbf_info); 2073 } 2074 2075 static int __init zcdn_init(void) 2076 { 2077 int rc; 2078 2079 /* create a new class 'zcrypt' */ 2080 zcrypt_class = class_create(ZCRYPT_NAME); 2081 if (IS_ERR(zcrypt_class)) { 2082 rc = PTR_ERR(zcrypt_class); 2083 goto out_class_create_failed; 2084 } 2085 zcrypt_class->dev_release = zcdn_device_release; 2086 2087 /* alloc device minor range */ 2088 rc = alloc_chrdev_region(&zcrypt_devt, 2089 0, ZCRYPT_MAX_MINOR_NODES, 2090 ZCRYPT_NAME); 2091 if (rc) 2092 goto out_alloc_chrdev_failed; 2093 2094 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2095 zcrypt_cdev.owner = THIS_MODULE; 2096 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2097 if (rc) 2098 goto out_cdev_add_failed; 2099 2100 /* need some class specific sysfs attributes */ 2101 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 2102 if (rc) 2103 goto out_class_create_file_1_failed; 2104 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 2105 if (rc) 2106 goto out_class_create_file_2_failed; 2107 2108 return 0; 2109 2110 out_class_create_file_2_failed: 2111 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2112 out_class_create_file_1_failed: 2113 cdev_del(&zcrypt_cdev); 2114 out_cdev_add_failed: 2115 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2116 out_alloc_chrdev_failed: 2117 class_destroy(zcrypt_class); 2118 out_class_create_failed: 2119 return rc; 2120 } 2121 2122 static void zcdn_exit(void) 2123 { 2124 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2125 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 2126 zcdn_destroy_all(); 2127 cdev_del(&zcrypt_cdev); 2128 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2129 class_destroy(zcrypt_class); 2130 } 2131 2132 /* 2133 * zcrypt_api_init(): Module initialization. 2134 * 2135 * The module initialization code. 2136 */ 2137 int __init zcrypt_api_init(void) 2138 { 2139 int rc; 2140 2141 rc = zcrypt_debug_init(); 2142 if (rc) 2143 goto out; 2144 2145 rc = zcdn_init(); 2146 if (rc) 2147 goto out; 2148 2149 /* Register the request sprayer. */ 2150 rc = misc_register(&zcrypt_misc_device); 2151 if (rc < 0) 2152 goto out_misc_register_failed; 2153 2154 zcrypt_msgtype6_init(); 2155 zcrypt_msgtype50_init(); 2156 2157 return 0; 2158 2159 out_misc_register_failed: 2160 zcdn_exit(); 2161 zcrypt_debug_exit(); 2162 out: 2163 return rc; 2164 } 2165 2166 /* 2167 * zcrypt_api_exit(): Module termination. 2168 * 2169 * The module termination code. 2170 */ 2171 void __exit zcrypt_api_exit(void) 2172 { 2173 zcdn_exit(); 2174 misc_deregister(&zcrypt_misc_device); 2175 zcrypt_msgtype6_exit(); 2176 zcrypt_msgtype50_exit(); 2177 zcrypt_ccamisc_exit(); 2178 zcrypt_ep11misc_exit(); 2179 zcrypt_debug_exit(); 2180 } 2181 2182 module_init(zcrypt_api_init); 2183 module_exit(zcrypt_api_exit); 2184