1 /* 2 * The file intends to implement the platform dependent EEH operations on pseries. 3 * Actually, the pseries platform is built based on RTAS heavily. That means the 4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions 5 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has 6 * been done. 7 * 8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. 9 * Copyright IBM Corporation 2001, 2005, 2006 10 * Copyright Dave Engebretsen & Todd Inglett 2001 11 * Copyright Linas Vepstas 2005, 2006 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/atomic.h> 29 #include <linux/delay.h> 30 #include <linux/export.h> 31 #include <linux/init.h> 32 #include <linux/list.h> 33 #include <linux/of.h> 34 #include <linux/pci.h> 35 #include <linux/proc_fs.h> 36 #include <linux/rbtree.h> 37 #include <linux/sched.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 41 #include <asm/eeh.h> 42 #include <asm/eeh_event.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/ppc-pci.h> 46 #include <asm/rtas.h> 47 48 /* RTAS tokens */ 49 static int ibm_set_eeh_option; 50 static int ibm_set_slot_reset; 51 static int ibm_read_slot_reset_state; 52 static int ibm_read_slot_reset_state2; 53 static int ibm_slot_error_detail; 54 static int ibm_get_config_addr_info; 55 static int ibm_get_config_addr_info2; 56 static int ibm_configure_bridge; 57 static int ibm_configure_pe; 58 59 /* 60 * Buffer for reporting slot-error-detail rtas calls. Its here 61 * in BSS, and not dynamically alloced, so that it ends up in 62 * RMO where RTAS can access it. 63 */ 64 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; 65 static DEFINE_SPINLOCK(slot_errbuf_lock); 66 static int eeh_error_buf_size; 67 68 /** 69 * pseries_eeh_init - EEH platform dependent initialization 70 * 71 * EEH platform dependent initialization on pseries. 72 */ 73 static int pseries_eeh_init(void) 74 { 75 /* figure out EEH RTAS function call tokens */ 76 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 77 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 78 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 79 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 80 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 85 86 /* necessary sanity check */ 87 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { 88 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", 89 __func__); 90 return -EINVAL; 91 } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) { 92 pr_warning("%s: RTAS service <ibm,set-slot-reset> invalid\n", 93 __func__); 94 return -EINVAL; 95 } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 96 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) { 97 pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and " 98 "<ibm,read-slot-reset-state> invalid\n", 99 __func__); 100 return -EINVAL; 101 } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) { 102 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", 103 __func__); 104 return -EINVAL; 105 } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE && 106 ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) { 107 pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and " 108 "<ibm,get-config-addr-info> invalid\n", 109 __func__); 110 return -EINVAL; 111 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 112 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { 113 pr_warning("%s: RTAS service <ibm,configure-pe> and " 114 "<ibm,configure-bridge> invalid\n", 115 __func__); 116 return -EINVAL; 117 } 118 119 /* Initialize error log lock and size */ 120 spin_lock_init(&slot_errbuf_lock); 121 eeh_error_buf_size = rtas_token("rtas-error-log-max"); 122 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { 123 pr_warning("%s: unknown EEH error log size\n", 124 __func__); 125 eeh_error_buf_size = 1024; 126 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { 127 pr_warning("%s: EEH error log size %d exceeds the maximal %d\n", 128 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); 129 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 130 } 131 132 /* Set EEH probe mode */ 133 eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE); 134 135 return 0; 136 } 137 138 /** 139 * pseries_eeh_of_probe - EEH probe on the given device 140 * @dn: OF node 141 * @flag: Unused 142 * 143 * When EEH module is installed during system boot, all PCI devices 144 * are checked one by one to see if it supports EEH. The function 145 * is introduced for the purpose. 146 */ 147 static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) 148 { 149 struct eeh_dev *edev; 150 struct eeh_pe pe; 151 const u32 *class_code, *vendor_id, *device_id; 152 const u32 *regs; 153 int enable = 0; 154 int ret; 155 156 /* Retrieve OF node and eeh device */ 157 edev = of_node_to_eeh_dev(dn); 158 if (!of_device_is_available(dn)) 159 return NULL; 160 161 /* Retrieve class/vendor/device IDs */ 162 class_code = of_get_property(dn, "class-code", NULL); 163 vendor_id = of_get_property(dn, "vendor-id", NULL); 164 device_id = of_get_property(dn, "device-id", NULL); 165 166 /* Skip for bad OF node or PCI-ISA bridge */ 167 if (!class_code || !vendor_id || !device_id) 168 return NULL; 169 if (dn->type && !strcmp(dn->type, "isa")) 170 return NULL; 171 172 /* Update class code and mode of eeh device */ 173 edev->class_code = *class_code; 174 edev->mode = 0; 175 176 /* Retrieve the device address */ 177 regs = of_get_property(dn, "reg", NULL); 178 if (!regs) { 179 pr_warning("%s: OF node property %s::reg not found\n", 180 __func__, dn->full_name); 181 return NULL; 182 } 183 184 /* Initialize the fake PE */ 185 memset(&pe, 0, sizeof(struct eeh_pe)); 186 pe.phb = edev->phb; 187 pe.config_addr = regs[0]; 188 189 /* Enable EEH on the device */ 190 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 191 if (!ret) { 192 edev->config_addr = regs[0]; 193 /* Retrieve PE address */ 194 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 195 pe.addr = edev->pe_config_addr; 196 197 /* Some older systems (Power4) allow the ibm,set-eeh-option 198 * call to succeed even on nodes where EEH is not supported. 199 * Verify support explicitly. 200 */ 201 ret = eeh_ops->get_state(&pe, NULL); 202 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) 203 enable = 1; 204 205 if (enable) { 206 eeh_subsystem_enabled = 1; 207 eeh_add_to_parent_pe(edev); 208 209 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", 210 __func__, dn->full_name, pe.phb->global_number, 211 pe.addr, pe.config_addr); 212 } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && 213 (of_node_to_eeh_dev(dn->parent))->pe) { 214 /* This device doesn't support EEH, but it may have an 215 * EEH parent, in which case we mark it as supported. 216 */ 217 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; 218 edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; 219 eeh_add_to_parent_pe(edev); 220 } 221 } 222 223 /* Save memory bars */ 224 eeh_save_bars(edev); 225 226 return NULL; 227 } 228 229 /** 230 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 231 * @pe: EEH PE 232 * @option: operation to be issued 233 * 234 * The function is used to control the EEH functionality globally. 235 * Currently, following options are support according to PAPR: 236 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 237 */ 238 static int pseries_eeh_set_option(struct eeh_pe *pe, int option) 239 { 240 int ret = 0; 241 int config_addr; 242 243 /* 244 * When we're enabling or disabling EEH functioality on 245 * the particular PE, the PE config address is possibly 246 * unavailable. Therefore, we have to figure it out from 247 * the FDT node. 248 */ 249 switch (option) { 250 case EEH_OPT_DISABLE: 251 case EEH_OPT_ENABLE: 252 case EEH_OPT_THAW_MMIO: 253 case EEH_OPT_THAW_DMA: 254 config_addr = pe->config_addr; 255 if (pe->addr) 256 config_addr = pe->addr; 257 break; 258 259 default: 260 pr_err("%s: Invalid option %d\n", 261 __func__, option); 262 return -EINVAL; 263 } 264 265 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 266 config_addr, BUID_HI(pe->phb->buid), 267 BUID_LO(pe->phb->buid), option); 268 269 return ret; 270 } 271 272 /** 273 * pseries_eeh_get_pe_addr - Retrieve PE address 274 * @pe: EEH PE 275 * 276 * Retrieve the assocated PE address. Actually, there're 2 RTAS 277 * function calls dedicated for the purpose. We need implement 278 * it through the new function and then the old one. Besides, 279 * you should make sure the config address is figured out from 280 * FDT node before calling the function. 281 * 282 * It's notable that zero'ed return value means invalid PE config 283 * address. 284 */ 285 static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) 286 { 287 int ret = 0; 288 int rets[3]; 289 290 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 291 /* 292 * First of all, we need to make sure there has one PE 293 * associated with the device. Otherwise, PE address is 294 * meaningless. 295 */ 296 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 297 pe->config_addr, BUID_HI(pe->phb->buid), 298 BUID_LO(pe->phb->buid), 1); 299 if (ret || (rets[0] == 0)) 300 return 0; 301 302 /* Retrieve the associated PE config address */ 303 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 304 pe->config_addr, BUID_HI(pe->phb->buid), 305 BUID_LO(pe->phb->buid), 0); 306 if (ret) { 307 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", 308 __func__, pe->phb->global_number, pe->config_addr); 309 return 0; 310 } 311 312 return rets[0]; 313 } 314 315 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 316 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 317 pe->config_addr, BUID_HI(pe->phb->buid), 318 BUID_LO(pe->phb->buid), 0); 319 if (ret) { 320 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", 321 __func__, pe->phb->global_number, pe->config_addr); 322 return 0; 323 } 324 325 return rets[0]; 326 } 327 328 return ret; 329 } 330 331 /** 332 * pseries_eeh_get_state - Retrieve PE state 333 * @pe: EEH PE 334 * @state: return value 335 * 336 * Retrieve the state of the specified PE. On RTAS compliant 337 * pseries platform, there already has one dedicated RTAS function 338 * for the purpose. It's notable that the associated PE config address 339 * might be ready when calling the function. Therefore, endeavour to 340 * use the PE config address if possible. Further more, there're 2 341 * RTAS calls for the purpose, we need to try the new one and back 342 * to the old one if the new one couldn't work properly. 343 */ 344 static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) 345 { 346 int config_addr; 347 int ret; 348 int rets[4]; 349 int result; 350 351 /* Figure out PE config address if possible */ 352 config_addr = pe->config_addr; 353 if (pe->addr) 354 config_addr = pe->addr; 355 356 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 357 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 358 config_addr, BUID_HI(pe->phb->buid), 359 BUID_LO(pe->phb->buid)); 360 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 361 /* Fake PE unavailable info */ 362 rets[2] = 0; 363 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 364 config_addr, BUID_HI(pe->phb->buid), 365 BUID_LO(pe->phb->buid)); 366 } else { 367 return EEH_STATE_NOT_SUPPORT; 368 } 369 370 if (ret) 371 return ret; 372 373 /* Parse the result out */ 374 result = 0; 375 if (rets[1]) { 376 switch(rets[0]) { 377 case 0: 378 result &= ~EEH_STATE_RESET_ACTIVE; 379 result |= EEH_STATE_MMIO_ACTIVE; 380 result |= EEH_STATE_DMA_ACTIVE; 381 break; 382 case 1: 383 result |= EEH_STATE_RESET_ACTIVE; 384 result |= EEH_STATE_MMIO_ACTIVE; 385 result |= EEH_STATE_DMA_ACTIVE; 386 break; 387 case 2: 388 result &= ~EEH_STATE_RESET_ACTIVE; 389 result &= ~EEH_STATE_MMIO_ACTIVE; 390 result &= ~EEH_STATE_DMA_ACTIVE; 391 break; 392 case 4: 393 result &= ~EEH_STATE_RESET_ACTIVE; 394 result &= ~EEH_STATE_MMIO_ACTIVE; 395 result &= ~EEH_STATE_DMA_ACTIVE; 396 result |= EEH_STATE_MMIO_ENABLED; 397 break; 398 case 5: 399 if (rets[2]) { 400 if (state) *state = rets[2]; 401 result = EEH_STATE_UNAVAILABLE; 402 } else { 403 result = EEH_STATE_NOT_SUPPORT; 404 } 405 default: 406 result = EEH_STATE_NOT_SUPPORT; 407 } 408 } else { 409 result = EEH_STATE_NOT_SUPPORT; 410 } 411 412 return result; 413 } 414 415 /** 416 * pseries_eeh_reset - Reset the specified PE 417 * @pe: EEH PE 418 * @option: reset option 419 * 420 * Reset the specified PE 421 */ 422 static int pseries_eeh_reset(struct eeh_pe *pe, int option) 423 { 424 int config_addr; 425 int ret; 426 427 /* Figure out PE address */ 428 config_addr = pe->config_addr; 429 if (pe->addr) 430 config_addr = pe->addr; 431 432 /* Reset PE through RTAS call */ 433 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 434 config_addr, BUID_HI(pe->phb->buid), 435 BUID_LO(pe->phb->buid), option); 436 437 /* If fundamental-reset not supported, try hot-reset */ 438 if (option == EEH_RESET_FUNDAMENTAL && 439 ret == -8) { 440 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 441 config_addr, BUID_HI(pe->phb->buid), 442 BUID_LO(pe->phb->buid), EEH_RESET_HOT); 443 } 444 445 return ret; 446 } 447 448 /** 449 * pseries_eeh_wait_state - Wait for PE state 450 * @pe: EEH PE 451 * @max_wait: maximal period in microsecond 452 * 453 * Wait for the state of associated PE. It might take some time 454 * to retrieve the PE's state. 455 */ 456 static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) 457 { 458 int ret; 459 int mwait; 460 461 /* 462 * According to PAPR, the state of PE might be temporarily 463 * unavailable. Under the circumstance, we have to wait 464 * for indicated time determined by firmware. The maximal 465 * wait time is 5 minutes, which is acquired from the original 466 * EEH implementation. Also, the original implementation 467 * also defined the minimal wait time as 1 second. 468 */ 469 #define EEH_STATE_MIN_WAIT_TIME (1000) 470 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 471 472 while (1) { 473 ret = pseries_eeh_get_state(pe, &mwait); 474 475 /* 476 * If the PE's state is temporarily unavailable, 477 * we have to wait for the specified time. Otherwise, 478 * the PE's state will be returned immediately. 479 */ 480 if (ret != EEH_STATE_UNAVAILABLE) 481 return ret; 482 483 if (max_wait <= 0) { 484 pr_warning("%s: Timeout when getting PE's state (%d)\n", 485 __func__, max_wait); 486 return EEH_STATE_NOT_SUPPORT; 487 } 488 489 if (mwait <= 0) { 490 pr_warning("%s: Firmware returned bad wait value %d\n", 491 __func__, mwait); 492 mwait = EEH_STATE_MIN_WAIT_TIME; 493 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { 494 pr_warning("%s: Firmware returned too long wait value %d\n", 495 __func__, mwait); 496 mwait = EEH_STATE_MAX_WAIT_TIME; 497 } 498 499 max_wait -= mwait; 500 msleep(mwait); 501 } 502 503 return EEH_STATE_NOT_SUPPORT; 504 } 505 506 /** 507 * pseries_eeh_get_log - Retrieve error log 508 * @pe: EEH PE 509 * @severity: temporary or permanent error log 510 * @drv_log: driver log to be combined with retrieved error log 511 * @len: length of driver log 512 * 513 * Retrieve the temporary or permanent error from the PE. 514 * Actually, the error will be retrieved through the dedicated 515 * RTAS call. 516 */ 517 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) 518 { 519 int config_addr; 520 unsigned long flags; 521 int ret; 522 523 spin_lock_irqsave(&slot_errbuf_lock, flags); 524 memset(slot_errbuf, 0, eeh_error_buf_size); 525 526 /* Figure out the PE address */ 527 config_addr = pe->config_addr; 528 if (pe->addr) 529 config_addr = pe->addr; 530 531 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 532 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 533 virt_to_phys(drv_log), len, 534 virt_to_phys(slot_errbuf), eeh_error_buf_size, 535 severity); 536 if (!ret) 537 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); 538 spin_unlock_irqrestore(&slot_errbuf_lock, flags); 539 540 return ret; 541 } 542 543 /** 544 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 545 * @pe: EEH PE 546 * 547 * The function will be called to reconfigure the bridges included 548 * in the specified PE so that the mulfunctional PE would be recovered 549 * again. 550 */ 551 static int pseries_eeh_configure_bridge(struct eeh_pe *pe) 552 { 553 int config_addr; 554 int ret; 555 556 /* Figure out the PE address */ 557 config_addr = pe->config_addr; 558 if (pe->addr) 559 config_addr = pe->addr; 560 561 /* Use new configure-pe function, if supported */ 562 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { 563 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 564 config_addr, BUID_HI(pe->phb->buid), 565 BUID_LO(pe->phb->buid)); 566 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { 567 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, 568 config_addr, BUID_HI(pe->phb->buid), 569 BUID_LO(pe->phb->buid)); 570 } else { 571 return -EFAULT; 572 } 573 574 if (ret) 575 pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", 576 __func__, pe->phb->global_number, pe->addr, ret); 577 578 return ret; 579 } 580 581 /** 582 * pseries_eeh_read_config - Read PCI config space 583 * @dn: device node 584 * @where: PCI address 585 * @size: size to read 586 * @val: return value 587 * 588 * Read config space from the speicifed device 589 */ 590 static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val) 591 { 592 struct pci_dn *pdn; 593 594 pdn = PCI_DN(dn); 595 596 return rtas_read_config(pdn, where, size, val); 597 } 598 599 /** 600 * pseries_eeh_write_config - Write PCI config space 601 * @dn: device node 602 * @where: PCI address 603 * @size: size to write 604 * @val: value to be written 605 * 606 * Write config space to the specified device 607 */ 608 static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val) 609 { 610 struct pci_dn *pdn; 611 612 pdn = PCI_DN(dn); 613 614 return rtas_write_config(pdn, where, size, val); 615 } 616 617 static struct eeh_ops pseries_eeh_ops = { 618 .name = "pseries", 619 .init = pseries_eeh_init, 620 .of_probe = pseries_eeh_of_probe, 621 .dev_probe = NULL, 622 .set_option = pseries_eeh_set_option, 623 .get_pe_addr = pseries_eeh_get_pe_addr, 624 .get_state = pseries_eeh_get_state, 625 .reset = pseries_eeh_reset, 626 .wait_state = pseries_eeh_wait_state, 627 .get_log = pseries_eeh_get_log, 628 .configure_bridge = pseries_eeh_configure_bridge, 629 .read_config = pseries_eeh_read_config, 630 .write_config = pseries_eeh_write_config 631 }; 632 633 /** 634 * eeh_pseries_init - Register platform dependent EEH operations 635 * 636 * EEH initialization on pseries platform. This function should be 637 * called before any EEH related functions. 638 */ 639 static int __init eeh_pseries_init(void) 640 { 641 int ret = -EINVAL; 642 643 if (!machine_is(pseries)) 644 return ret; 645 646 ret = eeh_ops_register(&pseries_eeh_ops); 647 if (!ret) 648 pr_info("EEH: pSeries platform initialized\n"); 649 else 650 pr_info("EEH: pSeries platform initialization failure (%d)\n", 651 ret); 652 653 return ret; 654 } 655 656 early_initcall(eeh_pseries_init); 657