1 /* 2 * zcrypt 2.1.0 3 * 4 * Copyright IBM Corp. 2001, 2006 5 * Author(s): Robert Burroughs 6 * Eric Rossman (edrossma@us.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com> 8 * 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 */ 27 28 #include <linux/module.h> 29 #include <linux/init.h> 30 #include <linux/interrupt.h> 31 #include <linux/miscdevice.h> 32 #include <linux/fs.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 #include <linux/compat.h> 36 #include <linux/slab.h> 37 #include <linux/atomic.h> 38 #include <asm/uaccess.h> 39 #include <linux/hw_random.h> 40 41 #include "zcrypt_api.h" 42 43 /* 44 * Module description. 45 */ 46 MODULE_AUTHOR("IBM Corporation"); 47 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " 48 "Copyright IBM Corp. 2001, 2006"); 49 MODULE_LICENSE("GPL"); 50 51 static DEFINE_SPINLOCK(zcrypt_device_lock); 52 static LIST_HEAD(zcrypt_device_list); 53 static int zcrypt_device_count = 0; 54 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 55 56 static int zcrypt_rng_device_add(void); 57 static void zcrypt_rng_device_remove(void); 58 59 /* 60 * Device attributes common for all crypto devices. 61 */ 62 static ssize_t zcrypt_type_show(struct device *dev, 63 struct device_attribute *attr, char *buf) 64 { 65 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 66 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); 67 } 68 69 static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); 70 71 static ssize_t zcrypt_online_show(struct device *dev, 72 struct device_attribute *attr, char *buf) 73 { 74 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 75 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); 76 } 77 78 static ssize_t zcrypt_online_store(struct device *dev, 79 struct device_attribute *attr, 80 const char *buf, size_t count) 81 { 82 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 83 int online; 84 85 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 86 return -EINVAL; 87 zdev->online = online; 88 if (!online) 89 ap_flush_queue(zdev->ap_dev); 90 return count; 91 } 92 93 static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); 94 95 static struct attribute * zcrypt_device_attrs[] = { 96 &dev_attr_type.attr, 97 &dev_attr_online.attr, 98 NULL, 99 }; 100 101 static struct attribute_group zcrypt_device_attr_group = { 102 .attrs = zcrypt_device_attrs, 103 }; 104 105 /** 106 * __zcrypt_increase_preference(): Increase preference of a crypto device. 107 * @zdev: Pointer the crypto device 108 * 109 * Move the device towards the head of the device list. 110 * Need to be called while holding the zcrypt device list lock. 111 * Note: cards with speed_rating of 0 are kept at the end of the list. 112 */ 113 static void __zcrypt_increase_preference(struct zcrypt_device *zdev) 114 { 115 struct zcrypt_device *tmp; 116 struct list_head *l; 117 118 if (zdev->speed_rating == 0) 119 return; 120 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { 121 tmp = list_entry(l, struct zcrypt_device, list); 122 if ((tmp->request_count + 1) * tmp->speed_rating <= 123 (zdev->request_count + 1) * zdev->speed_rating && 124 tmp->speed_rating != 0) 125 break; 126 } 127 if (l == zdev->list.prev) 128 return; 129 /* Move zdev behind l */ 130 list_move(&zdev->list, l); 131 } 132 133 /** 134 * __zcrypt_decrease_preference(): Decrease preference of a crypto device. 135 * @zdev: Pointer to a crypto device. 136 * 137 * Move the device towards the tail of the device list. 138 * Need to be called while holding the zcrypt device list lock. 139 * Note: cards with speed_rating of 0 are kept at the end of the list. 140 */ 141 static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) 142 { 143 struct zcrypt_device *tmp; 144 struct list_head *l; 145 146 if (zdev->speed_rating == 0) 147 return; 148 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { 149 tmp = list_entry(l, struct zcrypt_device, list); 150 if ((tmp->request_count + 1) * tmp->speed_rating > 151 (zdev->request_count + 1) * zdev->speed_rating || 152 tmp->speed_rating == 0) 153 break; 154 } 155 if (l == zdev->list.next) 156 return; 157 /* Move zdev before l */ 158 list_move_tail(&zdev->list, l); 159 } 160 161 static void zcrypt_device_release(struct kref *kref) 162 { 163 struct zcrypt_device *zdev = 164 container_of(kref, struct zcrypt_device, refcount); 165 zcrypt_device_free(zdev); 166 } 167 168 void zcrypt_device_get(struct zcrypt_device *zdev) 169 { 170 kref_get(&zdev->refcount); 171 } 172 EXPORT_SYMBOL(zcrypt_device_get); 173 174 int zcrypt_device_put(struct zcrypt_device *zdev) 175 { 176 return kref_put(&zdev->refcount, zcrypt_device_release); 177 } 178 EXPORT_SYMBOL(zcrypt_device_put); 179 180 struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) 181 { 182 struct zcrypt_device *zdev; 183 184 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); 185 if (!zdev) 186 return NULL; 187 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); 188 if (!zdev->reply.message) 189 goto out_free; 190 zdev->reply.length = max_response_size; 191 spin_lock_init(&zdev->lock); 192 INIT_LIST_HEAD(&zdev->list); 193 return zdev; 194 195 out_free: 196 kfree(zdev); 197 return NULL; 198 } 199 EXPORT_SYMBOL(zcrypt_device_alloc); 200 201 void zcrypt_device_free(struct zcrypt_device *zdev) 202 { 203 kfree(zdev->reply.message); 204 kfree(zdev); 205 } 206 EXPORT_SYMBOL(zcrypt_device_free); 207 208 /** 209 * zcrypt_device_register() - Register a crypto device. 210 * @zdev: Pointer to a crypto device 211 * 212 * Register a crypto device. Returns 0 if successful. 213 */ 214 int zcrypt_device_register(struct zcrypt_device *zdev) 215 { 216 int rc; 217 218 rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 219 &zcrypt_device_attr_group); 220 if (rc) 221 goto out; 222 get_device(&zdev->ap_dev->device); 223 kref_init(&zdev->refcount); 224 spin_lock_bh(&zcrypt_device_lock); 225 zdev->online = 1; /* New devices are online by default. */ 226 list_add_tail(&zdev->list, &zcrypt_device_list); 227 __zcrypt_increase_preference(zdev); 228 zcrypt_device_count++; 229 spin_unlock_bh(&zcrypt_device_lock); 230 if (zdev->ops->rng) { 231 rc = zcrypt_rng_device_add(); 232 if (rc) 233 goto out_unregister; 234 } 235 return 0; 236 237 out_unregister: 238 spin_lock_bh(&zcrypt_device_lock); 239 zcrypt_device_count--; 240 list_del_init(&zdev->list); 241 spin_unlock_bh(&zcrypt_device_lock); 242 sysfs_remove_group(&zdev->ap_dev->device.kobj, 243 &zcrypt_device_attr_group); 244 put_device(&zdev->ap_dev->device); 245 zcrypt_device_put(zdev); 246 out: 247 return rc; 248 } 249 EXPORT_SYMBOL(zcrypt_device_register); 250 251 /** 252 * zcrypt_device_unregister(): Unregister a crypto device. 253 * @zdev: Pointer to crypto device 254 * 255 * Unregister a crypto device. 256 */ 257 void zcrypt_device_unregister(struct zcrypt_device *zdev) 258 { 259 if (zdev->ops->rng) 260 zcrypt_rng_device_remove(); 261 spin_lock_bh(&zcrypt_device_lock); 262 zcrypt_device_count--; 263 list_del_init(&zdev->list); 264 spin_unlock_bh(&zcrypt_device_lock); 265 sysfs_remove_group(&zdev->ap_dev->device.kobj, 266 &zcrypt_device_attr_group); 267 put_device(&zdev->ap_dev->device); 268 zcrypt_device_put(zdev); 269 } 270 EXPORT_SYMBOL(zcrypt_device_unregister); 271 272 /** 273 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 274 * 275 * This function is not supported beyond zcrypt 1.3.1. 276 */ 277 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 278 size_t count, loff_t *f_pos) 279 { 280 return -EPERM; 281 } 282 283 /** 284 * zcrypt_write(): Not allowed. 285 * 286 * Write is is not allowed 287 */ 288 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 289 size_t count, loff_t *f_pos) 290 { 291 return -EPERM; 292 } 293 294 /** 295 * zcrypt_open(): Count number of users. 296 * 297 * Device open function to count number of users. 298 */ 299 static int zcrypt_open(struct inode *inode, struct file *filp) 300 { 301 atomic_inc(&zcrypt_open_count); 302 return nonseekable_open(inode, filp); 303 } 304 305 /** 306 * zcrypt_release(): Count number of users. 307 * 308 * Device close function to count number of users. 309 */ 310 static int zcrypt_release(struct inode *inode, struct file *filp) 311 { 312 atomic_dec(&zcrypt_open_count); 313 return 0; 314 } 315 316 /* 317 * zcrypt ioctls. 318 */ 319 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 320 { 321 struct zcrypt_device *zdev; 322 int rc; 323 324 if (mex->outputdatalength < mex->inputdatalength) 325 return -EINVAL; 326 /* 327 * As long as outputdatalength is big enough, we can set the 328 * outputdatalength equal to the inputdatalength, since that is the 329 * number of bytes we will copy in any case 330 */ 331 mex->outputdatalength = mex->inputdatalength; 332 333 spin_lock_bh(&zcrypt_device_lock); 334 list_for_each_entry(zdev, &zcrypt_device_list, list) { 335 if (!zdev->online || 336 !zdev->ops->rsa_modexpo || 337 zdev->min_mod_size > mex->inputdatalength || 338 zdev->max_mod_size < mex->inputdatalength) 339 continue; 340 zcrypt_device_get(zdev); 341 get_device(&zdev->ap_dev->device); 342 zdev->request_count++; 343 __zcrypt_decrease_preference(zdev); 344 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 345 spin_unlock_bh(&zcrypt_device_lock); 346 rc = zdev->ops->rsa_modexpo(zdev, mex); 347 spin_lock_bh(&zcrypt_device_lock); 348 module_put(zdev->ap_dev->drv->driver.owner); 349 } 350 else 351 rc = -EAGAIN; 352 zdev->request_count--; 353 __zcrypt_increase_preference(zdev); 354 put_device(&zdev->ap_dev->device); 355 zcrypt_device_put(zdev); 356 spin_unlock_bh(&zcrypt_device_lock); 357 return rc; 358 } 359 spin_unlock_bh(&zcrypt_device_lock); 360 return -ENODEV; 361 } 362 363 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 364 { 365 struct zcrypt_device *zdev; 366 unsigned long long z1, z2, z3; 367 int rc, copied; 368 369 if (crt->outputdatalength < crt->inputdatalength || 370 (crt->inputdatalength & 1)) 371 return -EINVAL; 372 /* 373 * As long as outputdatalength is big enough, we can set the 374 * outputdatalength equal to the inputdatalength, since that is the 375 * number of bytes we will copy in any case 376 */ 377 crt->outputdatalength = crt->inputdatalength; 378 379 copied = 0; 380 restart: 381 spin_lock_bh(&zcrypt_device_lock); 382 list_for_each_entry(zdev, &zcrypt_device_list, list) { 383 if (!zdev->online || 384 !zdev->ops->rsa_modexpo_crt || 385 zdev->min_mod_size > crt->inputdatalength || 386 zdev->max_mod_size < crt->inputdatalength) 387 continue; 388 if (zdev->short_crt && crt->inputdatalength > 240) { 389 /* 390 * Check inputdata for leading zeros for cards 391 * that can't handle np_prime, bp_key, or 392 * u_mult_inv > 128 bytes. 393 */ 394 if (copied == 0) { 395 unsigned int len; 396 spin_unlock_bh(&zcrypt_device_lock); 397 /* len is max 256 / 2 - 120 = 8 398 * For bigger device just assume len of leading 399 * 0s is 8 as stated in the requirements for 400 * ica_rsa_modexpo_crt struct in zcrypt.h. 401 */ 402 if (crt->inputdatalength <= 256) 403 len = crt->inputdatalength / 2 - 120; 404 else 405 len = 8; 406 if (len > sizeof(z1)) 407 return -EFAULT; 408 z1 = z2 = z3 = 0; 409 if (copy_from_user(&z1, crt->np_prime, len) || 410 copy_from_user(&z2, crt->bp_key, len) || 411 copy_from_user(&z3, crt->u_mult_inv, len)) 412 return -EFAULT; 413 z1 = z2 = z3 = 0; 414 copied = 1; 415 /* 416 * We have to restart device lookup - 417 * the device list may have changed by now. 418 */ 419 goto restart; 420 } 421 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) 422 /* The device can't handle this request. */ 423 continue; 424 } 425 zcrypt_device_get(zdev); 426 get_device(&zdev->ap_dev->device); 427 zdev->request_count++; 428 __zcrypt_decrease_preference(zdev); 429 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 430 spin_unlock_bh(&zcrypt_device_lock); 431 rc = zdev->ops->rsa_modexpo_crt(zdev, crt); 432 spin_lock_bh(&zcrypt_device_lock); 433 module_put(zdev->ap_dev->drv->driver.owner); 434 } 435 else 436 rc = -EAGAIN; 437 zdev->request_count--; 438 __zcrypt_increase_preference(zdev); 439 put_device(&zdev->ap_dev->device); 440 zcrypt_device_put(zdev); 441 spin_unlock_bh(&zcrypt_device_lock); 442 return rc; 443 } 444 spin_unlock_bh(&zcrypt_device_lock); 445 return -ENODEV; 446 } 447 448 static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 449 { 450 struct zcrypt_device *zdev; 451 int rc; 452 453 spin_lock_bh(&zcrypt_device_lock); 454 list_for_each_entry(zdev, &zcrypt_device_list, list) { 455 if (!zdev->online || !zdev->ops->send_cprb || 456 (xcRB->user_defined != AUTOSELECT && 457 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) 458 ) 459 continue; 460 zcrypt_device_get(zdev); 461 get_device(&zdev->ap_dev->device); 462 zdev->request_count++; 463 __zcrypt_decrease_preference(zdev); 464 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 465 spin_unlock_bh(&zcrypt_device_lock); 466 rc = zdev->ops->send_cprb(zdev, xcRB); 467 spin_lock_bh(&zcrypt_device_lock); 468 module_put(zdev->ap_dev->drv->driver.owner); 469 } 470 else 471 rc = -EAGAIN; 472 zdev->request_count--; 473 __zcrypt_increase_preference(zdev); 474 put_device(&zdev->ap_dev->device); 475 zcrypt_device_put(zdev); 476 spin_unlock_bh(&zcrypt_device_lock); 477 return rc; 478 } 479 spin_unlock_bh(&zcrypt_device_lock); 480 return -ENODEV; 481 } 482 483 static long zcrypt_rng(char *buffer) 484 { 485 struct zcrypt_device *zdev; 486 int rc; 487 488 spin_lock_bh(&zcrypt_device_lock); 489 list_for_each_entry(zdev, &zcrypt_device_list, list) { 490 if (!zdev->online || !zdev->ops->rng) 491 continue; 492 zcrypt_device_get(zdev); 493 get_device(&zdev->ap_dev->device); 494 zdev->request_count++; 495 __zcrypt_decrease_preference(zdev); 496 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 497 spin_unlock_bh(&zcrypt_device_lock); 498 rc = zdev->ops->rng(zdev, buffer); 499 spin_lock_bh(&zcrypt_device_lock); 500 module_put(zdev->ap_dev->drv->driver.owner); 501 } else 502 rc = -EAGAIN; 503 zdev->request_count--; 504 __zcrypt_increase_preference(zdev); 505 put_device(&zdev->ap_dev->device); 506 zcrypt_device_put(zdev); 507 spin_unlock_bh(&zcrypt_device_lock); 508 return rc; 509 } 510 spin_unlock_bh(&zcrypt_device_lock); 511 return -ENODEV; 512 } 513 514 static void zcrypt_status_mask(char status[AP_DEVICES]) 515 { 516 struct zcrypt_device *zdev; 517 518 memset(status, 0, sizeof(char) * AP_DEVICES); 519 spin_lock_bh(&zcrypt_device_lock); 520 list_for_each_entry(zdev, &zcrypt_device_list, list) 521 status[AP_QID_DEVICE(zdev->ap_dev->qid)] = 522 zdev->online ? zdev->user_space_type : 0x0d; 523 spin_unlock_bh(&zcrypt_device_lock); 524 } 525 526 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 527 { 528 struct zcrypt_device *zdev; 529 530 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 531 spin_lock_bh(&zcrypt_device_lock); 532 list_for_each_entry(zdev, &zcrypt_device_list, list) { 533 spin_lock(&zdev->ap_dev->lock); 534 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = 535 zdev->ap_dev->pendingq_count + 536 zdev->ap_dev->requestq_count; 537 spin_unlock(&zdev->ap_dev->lock); 538 } 539 spin_unlock_bh(&zcrypt_device_lock); 540 } 541 542 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 543 { 544 struct zcrypt_device *zdev; 545 546 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 547 spin_lock_bh(&zcrypt_device_lock); 548 list_for_each_entry(zdev, &zcrypt_device_list, list) { 549 spin_lock(&zdev->ap_dev->lock); 550 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = 551 zdev->ap_dev->total_request_count; 552 spin_unlock(&zdev->ap_dev->lock); 553 } 554 spin_unlock_bh(&zcrypt_device_lock); 555 } 556 557 static int zcrypt_pendingq_count(void) 558 { 559 struct zcrypt_device *zdev; 560 int pendingq_count = 0; 561 562 spin_lock_bh(&zcrypt_device_lock); 563 list_for_each_entry(zdev, &zcrypt_device_list, list) { 564 spin_lock(&zdev->ap_dev->lock); 565 pendingq_count += zdev->ap_dev->pendingq_count; 566 spin_unlock(&zdev->ap_dev->lock); 567 } 568 spin_unlock_bh(&zcrypt_device_lock); 569 return pendingq_count; 570 } 571 572 static int zcrypt_requestq_count(void) 573 { 574 struct zcrypt_device *zdev; 575 int requestq_count = 0; 576 577 spin_lock_bh(&zcrypt_device_lock); 578 list_for_each_entry(zdev, &zcrypt_device_list, list) { 579 spin_lock(&zdev->ap_dev->lock); 580 requestq_count += zdev->ap_dev->requestq_count; 581 spin_unlock(&zdev->ap_dev->lock); 582 } 583 spin_unlock_bh(&zcrypt_device_lock); 584 return requestq_count; 585 } 586 587 static int zcrypt_count_type(int type) 588 { 589 struct zcrypt_device *zdev; 590 int device_count = 0; 591 592 spin_lock_bh(&zcrypt_device_lock); 593 list_for_each_entry(zdev, &zcrypt_device_list, list) 594 if (zdev->user_space_type == type) 595 device_count++; 596 spin_unlock_bh(&zcrypt_device_lock); 597 return device_count; 598 } 599 600 /** 601 * zcrypt_ica_status(): Old, depracted combi status call. 602 * 603 * Old, deprecated combi status call. 604 */ 605 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 606 { 607 struct ica_z90_status *pstat; 608 int ret; 609 610 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 611 if (!pstat) 612 return -ENOMEM; 613 pstat->totalcount = zcrypt_device_count; 614 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 615 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 616 pstat->requestqWaitCount = zcrypt_requestq_count(); 617 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 618 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 619 pstat->cryptoDomain = ap_domain_index; 620 zcrypt_status_mask(pstat->status); 621 zcrypt_qdepth_mask(pstat->qdepth); 622 ret = 0; 623 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 624 ret = -EFAULT; 625 kfree(pstat); 626 return ret; 627 } 628 629 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 630 unsigned long arg) 631 { 632 int rc; 633 634 switch (cmd) { 635 case ICARSAMODEXPO: { 636 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 637 struct ica_rsa_modexpo mex; 638 if (copy_from_user(&mex, umex, sizeof(mex))) 639 return -EFAULT; 640 do { 641 rc = zcrypt_rsa_modexpo(&mex); 642 } while (rc == -EAGAIN); 643 if (rc) 644 return rc; 645 return put_user(mex.outputdatalength, &umex->outputdatalength); 646 } 647 case ICARSACRT: { 648 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 649 struct ica_rsa_modexpo_crt crt; 650 if (copy_from_user(&crt, ucrt, sizeof(crt))) 651 return -EFAULT; 652 do { 653 rc = zcrypt_rsa_crt(&crt); 654 } while (rc == -EAGAIN); 655 if (rc) 656 return rc; 657 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 658 } 659 case ZSECSENDCPRB: { 660 struct ica_xcRB __user *uxcRB = (void __user *) arg; 661 struct ica_xcRB xcRB; 662 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 663 return -EFAULT; 664 do { 665 rc = zcrypt_send_cprb(&xcRB); 666 } while (rc == -EAGAIN); 667 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 668 return -EFAULT; 669 return rc; 670 } 671 case Z90STAT_STATUS_MASK: { 672 char status[AP_DEVICES]; 673 zcrypt_status_mask(status); 674 if (copy_to_user((char __user *) arg, status, 675 sizeof(char) * AP_DEVICES)) 676 return -EFAULT; 677 return 0; 678 } 679 case Z90STAT_QDEPTH_MASK: { 680 char qdepth[AP_DEVICES]; 681 zcrypt_qdepth_mask(qdepth); 682 if (copy_to_user((char __user *) arg, qdepth, 683 sizeof(char) * AP_DEVICES)) 684 return -EFAULT; 685 return 0; 686 } 687 case Z90STAT_PERDEV_REQCNT: { 688 int reqcnt[AP_DEVICES]; 689 zcrypt_perdev_reqcnt(reqcnt); 690 if (copy_to_user((int __user *) arg, reqcnt, 691 sizeof(int) * AP_DEVICES)) 692 return -EFAULT; 693 return 0; 694 } 695 case Z90STAT_REQUESTQ_COUNT: 696 return put_user(zcrypt_requestq_count(), (int __user *) arg); 697 case Z90STAT_PENDINGQ_COUNT: 698 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 699 case Z90STAT_TOTALOPEN_COUNT: 700 return put_user(atomic_read(&zcrypt_open_count), 701 (int __user *) arg); 702 case Z90STAT_DOMAIN_INDEX: 703 return put_user(ap_domain_index, (int __user *) arg); 704 /* 705 * Deprecated ioctls. Don't add another device count ioctl, 706 * you can count them yourself in the user space with the 707 * output of the Z90STAT_STATUS_MASK ioctl. 708 */ 709 case ICAZ90STATUS: 710 return zcrypt_ica_status(filp, arg); 711 case Z90STAT_TOTALCOUNT: 712 return put_user(zcrypt_device_count, (int __user *) arg); 713 case Z90STAT_PCICACOUNT: 714 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 715 (int __user *) arg); 716 case Z90STAT_PCICCCOUNT: 717 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 718 (int __user *) arg); 719 case Z90STAT_PCIXCCMCL2COUNT: 720 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 721 (int __user *) arg); 722 case Z90STAT_PCIXCCMCL3COUNT: 723 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 724 (int __user *) arg); 725 case Z90STAT_PCIXCCCOUNT: 726 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 727 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 728 (int __user *) arg); 729 case Z90STAT_CEX2CCOUNT: 730 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 731 (int __user *) arg); 732 case Z90STAT_CEX2ACOUNT: 733 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 734 (int __user *) arg); 735 default: 736 /* unknown ioctl number */ 737 return -ENOIOCTLCMD; 738 } 739 } 740 741 #ifdef CONFIG_COMPAT 742 /* 743 * ioctl32 conversion routines 744 */ 745 struct compat_ica_rsa_modexpo { 746 compat_uptr_t inputdata; 747 unsigned int inputdatalength; 748 compat_uptr_t outputdata; 749 unsigned int outputdatalength; 750 compat_uptr_t b_key; 751 compat_uptr_t n_modulus; 752 }; 753 754 static long trans_modexpo32(struct file *filp, unsigned int cmd, 755 unsigned long arg) 756 { 757 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 758 struct compat_ica_rsa_modexpo mex32; 759 struct ica_rsa_modexpo mex64; 760 long rc; 761 762 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 763 return -EFAULT; 764 mex64.inputdata = compat_ptr(mex32.inputdata); 765 mex64.inputdatalength = mex32.inputdatalength; 766 mex64.outputdata = compat_ptr(mex32.outputdata); 767 mex64.outputdatalength = mex32.outputdatalength; 768 mex64.b_key = compat_ptr(mex32.b_key); 769 mex64.n_modulus = compat_ptr(mex32.n_modulus); 770 do { 771 rc = zcrypt_rsa_modexpo(&mex64); 772 } while (rc == -EAGAIN); 773 if (!rc) 774 rc = put_user(mex64.outputdatalength, 775 &umex32->outputdatalength); 776 return rc; 777 } 778 779 struct compat_ica_rsa_modexpo_crt { 780 compat_uptr_t inputdata; 781 unsigned int inputdatalength; 782 compat_uptr_t outputdata; 783 unsigned int outputdatalength; 784 compat_uptr_t bp_key; 785 compat_uptr_t bq_key; 786 compat_uptr_t np_prime; 787 compat_uptr_t nq_prime; 788 compat_uptr_t u_mult_inv; 789 }; 790 791 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 792 unsigned long arg) 793 { 794 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 795 struct compat_ica_rsa_modexpo_crt crt32; 796 struct ica_rsa_modexpo_crt crt64; 797 long rc; 798 799 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 800 return -EFAULT; 801 crt64.inputdata = compat_ptr(crt32.inputdata); 802 crt64.inputdatalength = crt32.inputdatalength; 803 crt64.outputdata= compat_ptr(crt32.outputdata); 804 crt64.outputdatalength = crt32.outputdatalength; 805 crt64.bp_key = compat_ptr(crt32.bp_key); 806 crt64.bq_key = compat_ptr(crt32.bq_key); 807 crt64.np_prime = compat_ptr(crt32.np_prime); 808 crt64.nq_prime = compat_ptr(crt32.nq_prime); 809 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 810 do { 811 rc = zcrypt_rsa_crt(&crt64); 812 } while (rc == -EAGAIN); 813 if (!rc) 814 rc = put_user(crt64.outputdatalength, 815 &ucrt32->outputdatalength); 816 return rc; 817 } 818 819 struct compat_ica_xcRB { 820 unsigned short agent_ID; 821 unsigned int user_defined; 822 unsigned short request_ID; 823 unsigned int request_control_blk_length; 824 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 825 compat_uptr_t request_control_blk_addr; 826 unsigned int request_data_length; 827 char padding2[16 - sizeof (compat_uptr_t)]; 828 compat_uptr_t request_data_address; 829 unsigned int reply_control_blk_length; 830 char padding3[16 - sizeof (compat_uptr_t)]; 831 compat_uptr_t reply_control_blk_addr; 832 unsigned int reply_data_length; 833 char padding4[16 - sizeof (compat_uptr_t)]; 834 compat_uptr_t reply_data_addr; 835 unsigned short priority_window; 836 unsigned int status; 837 } __attribute__((packed)); 838 839 static long trans_xcRB32(struct file *filp, unsigned int cmd, 840 unsigned long arg) 841 { 842 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 843 struct compat_ica_xcRB xcRB32; 844 struct ica_xcRB xcRB64; 845 long rc; 846 847 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 848 return -EFAULT; 849 xcRB64.agent_ID = xcRB32.agent_ID; 850 xcRB64.user_defined = xcRB32.user_defined; 851 xcRB64.request_ID = xcRB32.request_ID; 852 xcRB64.request_control_blk_length = 853 xcRB32.request_control_blk_length; 854 xcRB64.request_control_blk_addr = 855 compat_ptr(xcRB32.request_control_blk_addr); 856 xcRB64.request_data_length = 857 xcRB32.request_data_length; 858 xcRB64.request_data_address = 859 compat_ptr(xcRB32.request_data_address); 860 xcRB64.reply_control_blk_length = 861 xcRB32.reply_control_blk_length; 862 xcRB64.reply_control_blk_addr = 863 compat_ptr(xcRB32.reply_control_blk_addr); 864 xcRB64.reply_data_length = xcRB32.reply_data_length; 865 xcRB64.reply_data_addr = 866 compat_ptr(xcRB32.reply_data_addr); 867 xcRB64.priority_window = xcRB32.priority_window; 868 xcRB64.status = xcRB32.status; 869 do { 870 rc = zcrypt_send_cprb(&xcRB64); 871 } while (rc == -EAGAIN); 872 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 873 xcRB32.reply_data_length = xcRB64.reply_data_length; 874 xcRB32.status = xcRB64.status; 875 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 876 return -EFAULT; 877 return rc; 878 } 879 880 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 881 unsigned long arg) 882 { 883 if (cmd == ICARSAMODEXPO) 884 return trans_modexpo32(filp, cmd, arg); 885 if (cmd == ICARSACRT) 886 return trans_modexpo_crt32(filp, cmd, arg); 887 if (cmd == ZSECSENDCPRB) 888 return trans_xcRB32(filp, cmd, arg); 889 return zcrypt_unlocked_ioctl(filp, cmd, arg); 890 } 891 #endif 892 893 /* 894 * Misc device file operations. 895 */ 896 static const struct file_operations zcrypt_fops = { 897 .owner = THIS_MODULE, 898 .read = zcrypt_read, 899 .write = zcrypt_write, 900 .unlocked_ioctl = zcrypt_unlocked_ioctl, 901 #ifdef CONFIG_COMPAT 902 .compat_ioctl = zcrypt_compat_ioctl, 903 #endif 904 .open = zcrypt_open, 905 .release = zcrypt_release, 906 .llseek = no_llseek, 907 }; 908 909 /* 910 * Misc device. 911 */ 912 static struct miscdevice zcrypt_misc_device = { 913 .minor = MISC_DYNAMIC_MINOR, 914 .name = "z90crypt", 915 .fops = &zcrypt_fops, 916 }; 917 918 /* 919 * Deprecated /proc entry support. 920 */ 921 static struct proc_dir_entry *zcrypt_entry; 922 923 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) 924 { 925 int i; 926 927 for (i = 0; i < len; i++) 928 seq_printf(m, "%01x", (unsigned int) addr[i]); 929 seq_putc(m, ' '); 930 } 931 932 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) 933 { 934 int inl, c, cx; 935 936 seq_printf(m, " "); 937 inl = 0; 938 for (c = 0; c < (len / 16); c++) { 939 sprintcl(m, addr+inl, 16); 940 inl += 16; 941 } 942 cx = len%16; 943 if (cx) { 944 sprintcl(m, addr+inl, cx); 945 inl += cx; 946 } 947 seq_putc(m, '\n'); 948 } 949 950 static void sprinthx(unsigned char *title, struct seq_file *m, 951 unsigned char *addr, unsigned int len) 952 { 953 int inl, r, rx; 954 955 seq_printf(m, "\n%s\n", title); 956 inl = 0; 957 for (r = 0; r < (len / 64); r++) { 958 sprintrw(m, addr+inl, 64); 959 inl += 64; 960 } 961 rx = len % 64; 962 if (rx) { 963 sprintrw(m, addr+inl, rx); 964 inl += rx; 965 } 966 seq_putc(m, '\n'); 967 } 968 969 static void sprinthx4(unsigned char *title, struct seq_file *m, 970 unsigned int *array, unsigned int len) 971 { 972 int r; 973 974 seq_printf(m, "\n%s\n", title); 975 for (r = 0; r < len; r++) { 976 if ((r % 8) == 0) 977 seq_printf(m, " "); 978 seq_printf(m, "%08X ", array[r]); 979 if ((r % 8) == 7) 980 seq_putc(m, '\n'); 981 } 982 seq_putc(m, '\n'); 983 } 984 985 static int zcrypt_proc_show(struct seq_file *m, void *v) 986 { 987 char workarea[sizeof(int) * AP_DEVICES]; 988 989 seq_printf(m, "\nzcrypt version: %d.%d.%d\n", 990 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 991 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); 992 seq_printf(m, "Total device count: %d\n", zcrypt_device_count); 993 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); 994 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); 995 seq_printf(m, "PCIXCC MCL2 count: %d\n", 996 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 997 seq_printf(m, "PCIXCC MCL3 count: %d\n", 998 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 999 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); 1000 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); 1001 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); 1002 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); 1003 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); 1004 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); 1005 seq_printf(m, "Total open handles: %d\n\n", 1006 atomic_read(&zcrypt_open_count)); 1007 zcrypt_status_mask(workarea); 1008 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1009 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1010 m, workarea, AP_DEVICES); 1011 zcrypt_qdepth_mask(workarea); 1012 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); 1013 zcrypt_perdev_reqcnt((int *) workarea); 1014 sprinthx4("Per-device successfully completed request counts", 1015 m, (unsigned int *) workarea, AP_DEVICES); 1016 return 0; 1017 } 1018 1019 static int zcrypt_proc_open(struct inode *inode, struct file *file) 1020 { 1021 return single_open(file, zcrypt_proc_show, NULL); 1022 } 1023 1024 static void zcrypt_disable_card(int index) 1025 { 1026 struct zcrypt_device *zdev; 1027 1028 spin_lock_bh(&zcrypt_device_lock); 1029 list_for_each_entry(zdev, &zcrypt_device_list, list) 1030 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1031 zdev->online = 0; 1032 ap_flush_queue(zdev->ap_dev); 1033 break; 1034 } 1035 spin_unlock_bh(&zcrypt_device_lock); 1036 } 1037 1038 static void zcrypt_enable_card(int index) 1039 { 1040 struct zcrypt_device *zdev; 1041 1042 spin_lock_bh(&zcrypt_device_lock); 1043 list_for_each_entry(zdev, &zcrypt_device_list, list) 1044 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1045 zdev->online = 1; 1046 break; 1047 } 1048 spin_unlock_bh(&zcrypt_device_lock); 1049 } 1050 1051 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1052 size_t count, loff_t *pos) 1053 { 1054 unsigned char *lbuf, *ptr; 1055 size_t local_count; 1056 int j; 1057 1058 if (count <= 0) 1059 return 0; 1060 1061 #define LBUFSIZE 1200UL 1062 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1063 if (!lbuf) 1064 return 0; 1065 1066 local_count = min(LBUFSIZE - 1, count); 1067 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1068 kfree(lbuf); 1069 return -EFAULT; 1070 } 1071 lbuf[local_count] = '\0'; 1072 1073 ptr = strstr(lbuf, "Online devices"); 1074 if (!ptr) 1075 goto out; 1076 ptr = strstr(ptr, "\n"); 1077 if (!ptr) 1078 goto out; 1079 ptr++; 1080 1081 if (strstr(ptr, "Waiting work element counts") == NULL) 1082 goto out; 1083 1084 for (j = 0; j < 64 && *ptr; ptr++) { 1085 /* 1086 * '0' for no device, '1' for PCICA, '2' for PCICC, 1087 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1088 * '5' for CEX2C and '6' for CEX2A' 1089 * '7' for CEX3C and '8' for CEX3A 1090 */ 1091 if (*ptr >= '0' && *ptr <= '8') 1092 j++; 1093 else if (*ptr == 'd' || *ptr == 'D') 1094 zcrypt_disable_card(j++); 1095 else if (*ptr == 'e' || *ptr == 'E') 1096 zcrypt_enable_card(j++); 1097 else if (*ptr != ' ' && *ptr != '\t') 1098 break; 1099 } 1100 out: 1101 kfree(lbuf); 1102 return count; 1103 } 1104 1105 static const struct file_operations zcrypt_proc_fops = { 1106 .owner = THIS_MODULE, 1107 .open = zcrypt_proc_open, 1108 .read = seq_read, 1109 .llseek = seq_lseek, 1110 .release = single_release, 1111 .write = zcrypt_proc_write, 1112 }; 1113 1114 static int zcrypt_rng_device_count; 1115 static u32 *zcrypt_rng_buffer; 1116 static int zcrypt_rng_buffer_index; 1117 static DEFINE_MUTEX(zcrypt_rng_mutex); 1118 1119 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1120 { 1121 int rc; 1122 1123 /* 1124 * We don't need locking here because the RNG API guarantees serialized 1125 * read method calls. 1126 */ 1127 if (zcrypt_rng_buffer_index == 0) { 1128 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1129 if (rc < 0) 1130 return -EIO; 1131 zcrypt_rng_buffer_index = rc / sizeof *data; 1132 } 1133 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1134 return sizeof *data; 1135 } 1136 1137 static struct hwrng zcrypt_rng_dev = { 1138 .name = "zcrypt", 1139 .data_read = zcrypt_rng_data_read, 1140 }; 1141 1142 static int zcrypt_rng_device_add(void) 1143 { 1144 int rc = 0; 1145 1146 mutex_lock(&zcrypt_rng_mutex); 1147 if (zcrypt_rng_device_count == 0) { 1148 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1149 if (!zcrypt_rng_buffer) { 1150 rc = -ENOMEM; 1151 goto out; 1152 } 1153 zcrypt_rng_buffer_index = 0; 1154 rc = hwrng_register(&zcrypt_rng_dev); 1155 if (rc) 1156 goto out_free; 1157 zcrypt_rng_device_count = 1; 1158 } else 1159 zcrypt_rng_device_count++; 1160 mutex_unlock(&zcrypt_rng_mutex); 1161 return 0; 1162 1163 out_free: 1164 free_page((unsigned long) zcrypt_rng_buffer); 1165 out: 1166 mutex_unlock(&zcrypt_rng_mutex); 1167 return rc; 1168 } 1169 1170 static void zcrypt_rng_device_remove(void) 1171 { 1172 mutex_lock(&zcrypt_rng_mutex); 1173 zcrypt_rng_device_count--; 1174 if (zcrypt_rng_device_count == 0) { 1175 hwrng_unregister(&zcrypt_rng_dev); 1176 free_page((unsigned long) zcrypt_rng_buffer); 1177 } 1178 mutex_unlock(&zcrypt_rng_mutex); 1179 } 1180 1181 /** 1182 * zcrypt_api_init(): Module initialization. 1183 * 1184 * The module initialization code. 1185 */ 1186 int __init zcrypt_api_init(void) 1187 { 1188 int rc; 1189 1190 /* Register the request sprayer. */ 1191 rc = misc_register(&zcrypt_misc_device); 1192 if (rc < 0) 1193 goto out; 1194 1195 /* Set up the proc file system */ 1196 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); 1197 if (!zcrypt_entry) { 1198 rc = -ENOMEM; 1199 goto out_misc; 1200 } 1201 1202 return 0; 1203 1204 out_misc: 1205 misc_deregister(&zcrypt_misc_device); 1206 out: 1207 return rc; 1208 } 1209 1210 /** 1211 * zcrypt_api_exit(): Module termination. 1212 * 1213 * The module termination code. 1214 */ 1215 void zcrypt_api_exit(void) 1216 { 1217 remove_proc_entry("driver/z90crypt", NULL); 1218 misc_deregister(&zcrypt_misc_device); 1219 } 1220 1221 module_init(zcrypt_api_init); 1222 module_exit(zcrypt_api_exit); 1223