1 // SPDX-License-Identifier: GPL-2.0-only 2 /* n2-drv.c: Niagara-2 RNG driver. 3 * 4 * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/types.h> 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/preempt.h> 14 #include <linux/hw_random.h> 15 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/property.h> 19 20 #include <asm/hypervisor.h> 21 22 #include "n2rng.h" 23 24 #define DRV_MODULE_NAME "n2rng" 25 #define PFX DRV_MODULE_NAME ": " 26 #define DRV_MODULE_VERSION "0.3" 27 #define DRV_MODULE_RELDATE "Jan 7, 2017" 28 29 static char version[] = 30 DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 31 32 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); 33 MODULE_DESCRIPTION("Niagara2 RNG driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(DRV_MODULE_VERSION); 36 37 /* The Niagara2 RNG provides a 64-bit read-only random number 38 * register, plus a control register. Access to the RNG is 39 * virtualized through the hypervisor so that both guests and control 40 * nodes can access the device. 41 * 42 * The entropy source consists of raw entropy sources, each 43 * constructed from a voltage controlled oscillator whose phase is 44 * jittered by thermal noise sources. 45 * 46 * The oscillator in each of the three raw entropy sources run at 47 * different frequencies. Normally, all three generator outputs are 48 * gathered, xored together, and fed into a CRC circuit, the output of 49 * which is the 64-bit read-only register. 50 * 51 * Some time is necessary for all the necessary entropy to build up 52 * such that a full 64-bits of entropy are available in the register. 53 * In normal operating mode (RNG_CTL_LFSR is set), the chip implements 54 * an interlock which blocks register reads until sufficient entropy 55 * is available. 56 * 57 * A control register is provided for adjusting various aspects of RNG 58 * operation, and to enable diagnostic modes. Each of the three raw 59 * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also 60 * provided are fields for controlling the minimum time in cycles 61 * between read accesses to the register (RNG_CTL_WAIT, this controls 62 * the interlock described in the previous paragraph). 63 * 64 * The standard setting is to have the mode bit (RNG_CTL_LFSR) set, 65 * all three entropy sources enabled, and the interlock time set 66 * appropriately. 67 * 68 * The CRC polynomial used by the chip is: 69 * 70 * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 + 71 * x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 + 72 * x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1 73 * 74 * The RNG_CTL_VCO value of each noise cell must be programmed 75 * separately. This is why 4 control register values must be provided 76 * to the hypervisor. During a write, the hypervisor writes them all, 77 * one at a time, to the actual RNG_CTL register. The first three 78 * values are used to setup the desired RNG_CTL_VCO for each entropy 79 * source, for example: 80 * 81 * control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1 82 * control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2 83 * control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3 84 * 85 * And then the fourth value sets the final chip state and enables 86 * desired. 87 */ 88 89 static int n2rng_hv_err_trans(unsigned long hv_err) 90 { 91 switch (hv_err) { 92 case HV_EOK: 93 return 0; 94 case HV_EWOULDBLOCK: 95 return -EAGAIN; 96 case HV_ENOACCESS: 97 return -EPERM; 98 case HV_EIO: 99 return -EIO; 100 case HV_EBUSY: 101 return -EBUSY; 102 case HV_EBADALIGN: 103 case HV_ENORADDR: 104 return -EFAULT; 105 default: 106 return -EINVAL; 107 } 108 } 109 110 static unsigned long n2rng_generic_read_control_v2(unsigned long ra, 111 unsigned long unit) 112 { 113 unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status; 114 int block = 0, busy = 0; 115 116 while (1) { 117 hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state, 118 &ticks, 119 &watchdog_delta, 120 &watchdog_status); 121 if (hv_err == HV_EOK) 122 break; 123 124 if (hv_err == HV_EBUSY) { 125 if (++busy >= N2RNG_BUSY_LIMIT) 126 break; 127 128 udelay(1); 129 } else if (hv_err == HV_EWOULDBLOCK) { 130 if (++block >= N2RNG_BLOCK_LIMIT) 131 break; 132 133 __delay(ticks); 134 } else 135 break; 136 } 137 138 return hv_err; 139 } 140 141 /* In multi-socket situations, the hypervisor might need to 142 * queue up the RNG control register write if it's for a unit 143 * that is on a cpu socket other than the one we are executing on. 144 * 145 * We poll here waiting for a successful read of that control 146 * register to make sure the write has been actually performed. 147 */ 148 static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit) 149 { 150 unsigned long ra = __pa(&np->scratch_control[0]); 151 152 return n2rng_generic_read_control_v2(ra, unit); 153 } 154 155 static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit, 156 unsigned long state, 157 unsigned long control_ra, 158 unsigned long watchdog_timeout, 159 unsigned long *ticks) 160 { 161 unsigned long hv_err; 162 163 if (np->hvapi_major == 1) { 164 hv_err = sun4v_rng_ctl_write_v1(control_ra, state, 165 watchdog_timeout, ticks); 166 } else { 167 hv_err = sun4v_rng_ctl_write_v2(control_ra, state, 168 watchdog_timeout, unit); 169 if (hv_err == HV_EOK) 170 hv_err = n2rng_control_settle_v2(np, unit); 171 *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; 172 } 173 174 return hv_err; 175 } 176 177 static int n2rng_generic_read_data(unsigned long data_ra) 178 { 179 unsigned long ticks, hv_err; 180 int block = 0, hcheck = 0; 181 182 while (1) { 183 hv_err = sun4v_rng_data_read(data_ra, &ticks); 184 if (hv_err == HV_EOK) 185 return 0; 186 187 if (hv_err == HV_EWOULDBLOCK) { 188 if (++block >= N2RNG_BLOCK_LIMIT) 189 return -EWOULDBLOCK; 190 __delay(ticks); 191 } else if (hv_err == HV_ENOACCESS) { 192 return -EPERM; 193 } else if (hv_err == HV_EIO) { 194 if (++hcheck >= N2RNG_HCHECK_LIMIT) 195 return -EIO; 196 udelay(10000); 197 } else 198 return -ENODEV; 199 } 200 } 201 202 static unsigned long n2rng_read_diag_data_one(struct n2rng *np, 203 unsigned long unit, 204 unsigned long data_ra, 205 unsigned long data_len, 206 unsigned long *ticks) 207 { 208 unsigned long hv_err; 209 210 if (np->hvapi_major == 1) { 211 hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks); 212 } else { 213 hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len, 214 unit, ticks); 215 if (!*ticks) 216 *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; 217 } 218 return hv_err; 219 } 220 221 static int n2rng_generic_read_diag_data(struct n2rng *np, 222 unsigned long unit, 223 unsigned long data_ra, 224 unsigned long data_len) 225 { 226 unsigned long ticks, hv_err; 227 int block = 0; 228 229 while (1) { 230 hv_err = n2rng_read_diag_data_one(np, unit, 231 data_ra, data_len, 232 &ticks); 233 if (hv_err == HV_EOK) 234 return 0; 235 236 if (hv_err == HV_EWOULDBLOCK) { 237 if (++block >= N2RNG_BLOCK_LIMIT) 238 return -EWOULDBLOCK; 239 __delay(ticks); 240 } else if (hv_err == HV_ENOACCESS) { 241 return -EPERM; 242 } else if (hv_err == HV_EIO) { 243 return -EIO; 244 } else 245 return -ENODEV; 246 } 247 } 248 249 250 static int n2rng_generic_write_control(struct n2rng *np, 251 unsigned long control_ra, 252 unsigned long unit, 253 unsigned long state) 254 { 255 unsigned long hv_err, ticks; 256 int block = 0, busy = 0; 257 258 while (1) { 259 hv_err = n2rng_write_ctl_one(np, unit, state, control_ra, 260 np->wd_timeo, &ticks); 261 if (hv_err == HV_EOK) 262 return 0; 263 264 if (hv_err == HV_EWOULDBLOCK) { 265 if (++block >= N2RNG_BLOCK_LIMIT) 266 return -EWOULDBLOCK; 267 __delay(ticks); 268 } else if (hv_err == HV_EBUSY) { 269 if (++busy >= N2RNG_BUSY_LIMIT) 270 return -EBUSY; 271 udelay(1); 272 } else 273 return -ENODEV; 274 } 275 } 276 277 /* Just try to see if we can successfully access the control register 278 * of the RNG on the domain on which we are currently executing. 279 */ 280 static int n2rng_try_read_ctl(struct n2rng *np) 281 { 282 unsigned long hv_err; 283 unsigned long x; 284 285 if (np->hvapi_major == 1) { 286 hv_err = sun4v_rng_get_diag_ctl(); 287 } else { 288 /* We purposefully give invalid arguments, HV_NOACCESS 289 * is higher priority than the errors we'd get from 290 * these other cases, and that's the error we are 291 * truly interested in. 292 */ 293 hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x); 294 switch (hv_err) { 295 case HV_EWOULDBLOCK: 296 case HV_ENOACCESS: 297 break; 298 default: 299 hv_err = HV_EOK; 300 break; 301 } 302 } 303 304 return n2rng_hv_err_trans(hv_err); 305 } 306 307 static u64 n2rng_control_default(struct n2rng *np, int ctl) 308 { 309 u64 val = 0; 310 311 if (np->data->chip_version == 1) { 312 val = ((2 << RNG_v1_CTL_ASEL_SHIFT) | 313 (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v1_CTL_WAIT_SHIFT) | 314 RNG_CTL_LFSR); 315 316 switch (ctl) { 317 case 0: 318 val |= (1 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES1; 319 break; 320 case 1: 321 val |= (2 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES2; 322 break; 323 case 2: 324 val |= (3 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES3; 325 break; 326 case 3: 327 val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3; 328 break; 329 default: 330 break; 331 } 332 333 } else { 334 val = ((2 << RNG_v2_CTL_ASEL_SHIFT) | 335 (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v2_CTL_WAIT_SHIFT) | 336 RNG_CTL_LFSR); 337 338 switch (ctl) { 339 case 0: 340 val |= (1 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES1; 341 break; 342 case 1: 343 val |= (2 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES2; 344 break; 345 case 2: 346 val |= (3 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES3; 347 break; 348 case 3: 349 val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3; 350 break; 351 default: 352 break; 353 } 354 } 355 356 return val; 357 } 358 359 static void n2rng_control_swstate_init(struct n2rng *np) 360 { 361 int i; 362 363 np->flags |= N2RNG_FLAG_CONTROL; 364 365 np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT; 366 np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT; 367 np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT; 368 369 for (i = 0; i < np->num_units; i++) { 370 struct n2rng_unit *up = &np->units[i]; 371 372 up->control[0] = n2rng_control_default(np, 0); 373 up->control[1] = n2rng_control_default(np, 1); 374 up->control[2] = n2rng_control_default(np, 2); 375 up->control[3] = n2rng_control_default(np, 3); 376 } 377 378 np->hv_state = HV_RNG_STATE_UNCONFIGURED; 379 } 380 381 static int n2rng_grab_diag_control(struct n2rng *np) 382 { 383 int i, busy_count, err = -ENODEV; 384 385 busy_count = 0; 386 for (i = 0; i < 100; i++) { 387 err = n2rng_try_read_ctl(np); 388 if (err != -EAGAIN) 389 break; 390 391 if (++busy_count > 100) { 392 dev_err(&np->op->dev, 393 "Grab diag control timeout.\n"); 394 return -ENODEV; 395 } 396 397 udelay(1); 398 } 399 400 return err; 401 } 402 403 static int n2rng_init_control(struct n2rng *np) 404 { 405 int err = n2rng_grab_diag_control(np); 406 407 /* Not in the control domain, that's OK we are only a consumer 408 * of the RNG data, we don't setup and program it. 409 */ 410 if (err == -EPERM) 411 return 0; 412 if (err) 413 return err; 414 415 n2rng_control_swstate_init(np); 416 417 return 0; 418 } 419 420 static int n2rng_data_read(struct hwrng *rng, u32 *data) 421 { 422 struct n2rng *np = (struct n2rng *) rng->priv; 423 unsigned long ra = __pa(&np->test_data); 424 int len; 425 426 if (!(np->flags & N2RNG_FLAG_READY)) { 427 len = 0; 428 } else if (np->flags & N2RNG_FLAG_BUFFER_VALID) { 429 np->flags &= ~N2RNG_FLAG_BUFFER_VALID; 430 *data = np->buffer; 431 len = 4; 432 } else { 433 int err = n2rng_generic_read_data(ra); 434 if (!err) { 435 np->flags |= N2RNG_FLAG_BUFFER_VALID; 436 np->buffer = np->test_data >> 32; 437 *data = np->test_data & 0xffffffff; 438 len = 4; 439 } else { 440 dev_err(&np->op->dev, "RNG error, retesting\n"); 441 np->flags &= ~N2RNG_FLAG_READY; 442 if (!(np->flags & N2RNG_FLAG_SHUTDOWN)) 443 schedule_delayed_work(&np->work, 0); 444 len = 0; 445 } 446 } 447 448 return len; 449 } 450 451 /* On a guest node, just make sure we can read random data properly. 452 * If a control node reboots or reloads it's n2rng driver, this won't 453 * work during that time. So we have to keep probing until the device 454 * becomes usable. 455 */ 456 static int n2rng_guest_check(struct n2rng *np) 457 { 458 unsigned long ra = __pa(&np->test_data); 459 460 return n2rng_generic_read_data(ra); 461 } 462 463 static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit, 464 u64 *pre_control, u64 pre_state, 465 u64 *buffer, unsigned long buf_len, 466 u64 *post_control, u64 post_state) 467 { 468 unsigned long post_ctl_ra = __pa(post_control); 469 unsigned long pre_ctl_ra = __pa(pre_control); 470 unsigned long buffer_ra = __pa(buffer); 471 int err; 472 473 err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state); 474 if (err) 475 return err; 476 477 err = n2rng_generic_read_diag_data(np, unit, 478 buffer_ra, buf_len); 479 480 (void) n2rng_generic_write_control(np, post_ctl_ra, unit, 481 post_state); 482 483 return err; 484 } 485 486 static u64 advance_polynomial(u64 poly, u64 val, int count) 487 { 488 int i; 489 490 for (i = 0; i < count; i++) { 491 int highbit_set = ((s64)val < 0); 492 493 val <<= 1; 494 if (highbit_set) 495 val ^= poly; 496 } 497 498 return val; 499 } 500 501 static int n2rng_test_buffer_find(struct n2rng *np, u64 val) 502 { 503 int i, count = 0; 504 505 /* Purposefully skip over the first word. */ 506 for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) { 507 if (np->test_buffer[i] == val) 508 count++; 509 } 510 return count; 511 } 512 513 static void n2rng_dump_test_buffer(struct n2rng *np) 514 { 515 int i; 516 517 for (i = 0; i < SELFTEST_BUFFER_WORDS; i++) 518 dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n", 519 i, np->test_buffer[i]); 520 } 521 522 static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit) 523 { 524 u64 val; 525 int err, matches, limit; 526 527 switch (np->data->id) { 528 case N2_n2_rng: 529 case N2_vf_rng: 530 case N2_kt_rng: 531 case N2_m4_rng: /* yes, m4 uses the old value */ 532 val = RNG_v1_SELFTEST_VAL; 533 break; 534 default: 535 val = RNG_v2_SELFTEST_VAL; 536 break; 537 } 538 539 matches = 0; 540 for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) { 541 matches += n2rng_test_buffer_find(np, val); 542 if (matches >= SELFTEST_MATCH_GOAL) 543 break; 544 val = advance_polynomial(SELFTEST_POLY, val, 1); 545 } 546 547 err = 0; 548 if (limit >= SELFTEST_LOOPS_MAX) { 549 err = -ENODEV; 550 dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit); 551 n2rng_dump_test_buffer(np); 552 } else 553 dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit); 554 555 return err; 556 } 557 558 static int n2rng_control_selftest(struct n2rng *np, unsigned long unit) 559 { 560 int err; 561 u64 base, base3; 562 563 switch (np->data->id) { 564 case N2_n2_rng: 565 case N2_vf_rng: 566 case N2_kt_rng: 567 base = RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT; 568 base3 = base | RNG_CTL_LFSR | 569 ((RNG_v1_SELFTEST_TICKS - 2) << RNG_v1_CTL_WAIT_SHIFT); 570 break; 571 case N2_m4_rng: 572 base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT; 573 base3 = base | RNG_CTL_LFSR | 574 ((RNG_v1_SELFTEST_TICKS - 2) << RNG_v2_CTL_WAIT_SHIFT); 575 break; 576 default: 577 base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT; 578 base3 = base | RNG_CTL_LFSR | 579 (RNG_v2_SELFTEST_TICKS << RNG_v2_CTL_WAIT_SHIFT); 580 break; 581 } 582 583 np->test_control[0] = base; 584 np->test_control[1] = base; 585 np->test_control[2] = base; 586 np->test_control[3] = base3; 587 588 err = n2rng_entropy_diag_read(np, unit, np->test_control, 589 HV_RNG_STATE_HEALTHCHECK, 590 np->test_buffer, 591 sizeof(np->test_buffer), 592 &np->units[unit].control[0], 593 np->hv_state); 594 if (err) 595 return err; 596 597 return n2rng_check_selftest_buffer(np, unit); 598 } 599 600 static int n2rng_control_check(struct n2rng *np) 601 { 602 int i; 603 604 for (i = 0; i < np->num_units; i++) { 605 int err = n2rng_control_selftest(np, i); 606 if (err) 607 return err; 608 } 609 return 0; 610 } 611 612 /* The sanity checks passed, install the final configuration into the 613 * chip, it's ready to use. 614 */ 615 static int n2rng_control_configure_units(struct n2rng *np) 616 { 617 int unit, err; 618 619 err = 0; 620 for (unit = 0; unit < np->num_units; unit++) { 621 struct n2rng_unit *up = &np->units[unit]; 622 unsigned long ctl_ra = __pa(&up->control[0]); 623 int esrc; 624 u64 base, shift; 625 626 if (np->data->chip_version == 1) { 627 base = ((np->accum_cycles << RNG_v1_CTL_WAIT_SHIFT) | 628 (RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT) | 629 RNG_CTL_LFSR); 630 shift = RNG_v1_CTL_VCO_SHIFT; 631 } else { 632 base = ((np->accum_cycles << RNG_v2_CTL_WAIT_SHIFT) | 633 (RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT) | 634 RNG_CTL_LFSR); 635 shift = RNG_v2_CTL_VCO_SHIFT; 636 } 637 638 /* XXX This isn't the best. We should fetch a bunch 639 * XXX of words using each entropy source combined XXX 640 * with each VCO setting, and see which combinations 641 * XXX give the best random data. 642 */ 643 for (esrc = 0; esrc < 3; esrc++) 644 up->control[esrc] = base | 645 (esrc << shift) | 646 (RNG_CTL_ES1 << esrc); 647 648 up->control[3] = base | 649 (RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3); 650 651 err = n2rng_generic_write_control(np, ctl_ra, unit, 652 HV_RNG_STATE_CONFIGURED); 653 if (err) 654 break; 655 } 656 657 return err; 658 } 659 660 static void n2rng_work(struct work_struct *work) 661 { 662 struct n2rng *np = container_of(work, struct n2rng, work.work); 663 int err = 0; 664 static int retries = 4; 665 666 if (!(np->flags & N2RNG_FLAG_CONTROL)) { 667 err = n2rng_guest_check(np); 668 } else { 669 preempt_disable(); 670 err = n2rng_control_check(np); 671 preempt_enable(); 672 673 if (!err) 674 err = n2rng_control_configure_units(np); 675 } 676 677 if (!err) { 678 np->flags |= N2RNG_FLAG_READY; 679 dev_info(&np->op->dev, "RNG ready\n"); 680 } 681 682 if (--retries == 0) 683 dev_err(&np->op->dev, "Self-test retries failed, RNG not ready\n"); 684 else if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN)) 685 schedule_delayed_work(&np->work, HZ * 2); 686 } 687 688 static void n2rng_driver_version(void) 689 { 690 static int n2rng_version_printed; 691 692 if (n2rng_version_printed++ == 0) 693 pr_info("%s", version); 694 } 695 696 static const struct of_device_id n2rng_match[]; 697 static int n2rng_probe(struct platform_device *op) 698 { 699 int err = -ENOMEM; 700 struct n2rng *np; 701 702 n2rng_driver_version(); 703 np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL); 704 if (!np) 705 goto out; 706 np->op = op; 707 np->data = (struct n2rng_template *)device_get_match_data(&op->dev); 708 709 INIT_DELAYED_WORK(&np->work, n2rng_work); 710 711 if (np->data->multi_capable) 712 np->flags |= N2RNG_FLAG_MULTI; 713 714 err = -ENODEV; 715 np->hvapi_major = 2; 716 if (sun4v_hvapi_register(HV_GRP_RNG, 717 np->hvapi_major, 718 &np->hvapi_minor)) { 719 np->hvapi_major = 1; 720 if (sun4v_hvapi_register(HV_GRP_RNG, 721 np->hvapi_major, 722 &np->hvapi_minor)) { 723 dev_err(&op->dev, "Cannot register suitable " 724 "HVAPI version.\n"); 725 goto out; 726 } 727 } 728 729 if (np->flags & N2RNG_FLAG_MULTI) { 730 if (np->hvapi_major < 2) { 731 dev_err(&op->dev, "multi-unit-capable RNG requires " 732 "HVAPI major version 2 or later, got %lu\n", 733 np->hvapi_major); 734 goto out_hvapi_unregister; 735 } 736 np->num_units = of_getintprop_default(op->dev.of_node, 737 "rng-#units", 0); 738 if (!np->num_units) { 739 dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); 740 goto out_hvapi_unregister; 741 } 742 } else { 743 np->num_units = 1; 744 } 745 746 dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n", 747 np->hvapi_major, np->hvapi_minor); 748 np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units), 749 GFP_KERNEL); 750 err = -ENOMEM; 751 if (!np->units) 752 goto out_hvapi_unregister; 753 754 err = n2rng_init_control(np); 755 if (err) 756 goto out_hvapi_unregister; 757 758 dev_info(&op->dev, "Found %s RNG, units: %d\n", 759 ((np->flags & N2RNG_FLAG_MULTI) ? 760 "multi-unit-capable" : "single-unit"), 761 np->num_units); 762 763 np->hwrng.name = DRV_MODULE_NAME; 764 np->hwrng.data_read = n2rng_data_read; 765 np->hwrng.priv = (unsigned long) np; 766 767 err = devm_hwrng_register(&op->dev, &np->hwrng); 768 if (err) 769 goto out_hvapi_unregister; 770 771 platform_set_drvdata(op, np); 772 773 schedule_delayed_work(&np->work, 0); 774 775 return 0; 776 777 out_hvapi_unregister: 778 sun4v_hvapi_unregister(HV_GRP_RNG); 779 780 out: 781 return err; 782 } 783 784 static void n2rng_remove(struct platform_device *op) 785 { 786 struct n2rng *np = platform_get_drvdata(op); 787 788 np->flags |= N2RNG_FLAG_SHUTDOWN; 789 790 cancel_delayed_work_sync(&np->work); 791 792 sun4v_hvapi_unregister(HV_GRP_RNG); 793 } 794 795 static struct n2rng_template n2_template = { 796 .id = N2_n2_rng, 797 .multi_capable = 0, 798 .chip_version = 1, 799 }; 800 801 static struct n2rng_template vf_template = { 802 .id = N2_vf_rng, 803 .multi_capable = 1, 804 .chip_version = 1, 805 }; 806 807 static struct n2rng_template kt_template = { 808 .id = N2_kt_rng, 809 .multi_capable = 1, 810 .chip_version = 1, 811 }; 812 813 static struct n2rng_template m4_template = { 814 .id = N2_m4_rng, 815 .multi_capable = 1, 816 .chip_version = 2, 817 }; 818 819 static struct n2rng_template m7_template = { 820 .id = N2_m7_rng, 821 .multi_capable = 1, 822 .chip_version = 2, 823 }; 824 825 static const struct of_device_id n2rng_match[] = { 826 { 827 .name = "random-number-generator", 828 .compatible = "SUNW,n2-rng", 829 .data = &n2_template, 830 }, 831 { 832 .name = "random-number-generator", 833 .compatible = "SUNW,vf-rng", 834 .data = &vf_template, 835 }, 836 { 837 .name = "random-number-generator", 838 .compatible = "SUNW,kt-rng", 839 .data = &kt_template, 840 }, 841 { 842 .name = "random-number-generator", 843 .compatible = "ORCL,m4-rng", 844 .data = &m4_template, 845 }, 846 { 847 .name = "random-number-generator", 848 .compatible = "ORCL,m7-rng", 849 .data = &m7_template, 850 }, 851 {}, 852 }; 853 MODULE_DEVICE_TABLE(of, n2rng_match); 854 855 static struct platform_driver n2rng_driver = { 856 .driver = { 857 .name = "n2rng", 858 .of_match_table = n2rng_match, 859 }, 860 .probe = n2rng_probe, 861 .remove_new = n2rng_remove, 862 }; 863 864 module_platform_driver(n2rng_driver); 865