1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017 Free Electrons 4 * Copyright (C) 2017 NextThing Co 5 * 6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 7 */ 8 9 #include <linux/sizes.h> 10 #include <linux/slab.h> 11 12 #include "internals.h" 13 14 #define NAND_HYNIX_CMD_SET_PARAMS 0x36 15 #define NAND_HYNIX_CMD_APPLY_PARAMS 0x16 16 17 #define NAND_HYNIX_1XNM_RR_REPEAT 8 18 19 /** 20 * struct hynix_read_retry - read-retry data 21 * @nregs: number of register to set when applying a new read-retry mode 22 * @regs: register offsets (NAND chip dependent) 23 * @values: array of values to set in registers. The array size is equal to 24 * (nregs * nmodes) 25 */ 26 struct hynix_read_retry { 27 int nregs; 28 const u8 *regs; 29 u8 values[]; 30 }; 31 32 /** 33 * struct hynix_nand - private Hynix NAND struct 34 * @read_retry: read-retry information 35 */ 36 struct hynix_nand { 37 const struct hynix_read_retry *read_retry; 38 }; 39 40 /** 41 * struct hynix_read_retry_otp - structure describing how the read-retry OTP 42 * area 43 * @nregs: number of hynix private registers to set before reading the reading 44 * the OTP area 45 * @regs: registers that should be configured 46 * @values: values that should be set in regs 47 * @page: the address to pass to the READ_PAGE command. Depends on the NAND 48 * chip 49 * @size: size of the read-retry OTP section 50 */ 51 struct hynix_read_retry_otp { 52 int nregs; 53 const u8 *regs; 54 const u8 *values; 55 int page; 56 int size; 57 }; 58 59 static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip) 60 { 61 u8 jedecid[5] = { }; 62 int ret; 63 64 ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid)); 65 if (ret) 66 return false; 67 68 return !strncmp("JEDEC", jedecid, sizeof(jedecid)); 69 } 70 71 static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd) 72 { 73 if (nand_has_exec_op(chip)) { 74 struct nand_op_instr instrs[] = { 75 NAND_OP_CMD(cmd, 0), 76 }; 77 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 78 79 return nand_exec_op(chip, &op); 80 } 81 82 chip->legacy.cmdfunc(chip, cmd, -1, -1); 83 84 return 0; 85 } 86 87 static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val) 88 { 89 u16 column = ((u16)addr << 8) | addr; 90 91 if (nand_has_exec_op(chip)) { 92 struct nand_op_instr instrs[] = { 93 NAND_OP_ADDR(1, &addr, 0), 94 NAND_OP_8BIT_DATA_OUT(1, &val, 0), 95 }; 96 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 97 98 return nand_exec_op(chip, &op); 99 } 100 101 chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1); 102 chip->legacy.write_byte(chip, val); 103 104 return 0; 105 } 106 107 static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 108 { 109 struct hynix_nand *hynix = nand_get_manufacturer_data(chip); 110 const u8 *values; 111 int i, ret; 112 113 values = hynix->read_retry->values + 114 (retry_mode * hynix->read_retry->nregs); 115 116 /* Enter 'Set Hynix Parameters' mode */ 117 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS); 118 if (ret) 119 return ret; 120 121 /* 122 * Configure the NAND in the requested read-retry mode. 123 * This is done by setting pre-defined values in internal NAND 124 * registers. 125 * 126 * The set of registers is NAND specific, and the values are either 127 * predefined or extracted from an OTP area on the NAND (values are 128 * probably tweaked at production in this case). 129 */ 130 for (i = 0; i < hynix->read_retry->nregs; i++) { 131 ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i], 132 values[i]); 133 if (ret) 134 return ret; 135 } 136 137 /* Apply the new settings. */ 138 return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS); 139 } 140 141 /** 142 * hynix_get_majority - get the value that is occurring the most in a given 143 * set of values 144 * @in: the array of values to test 145 * @repeat: the size of the in array 146 * @out: pointer used to store the output value 147 * 148 * This function implements the 'majority check' logic that is supposed to 149 * overcome the unreliability of MLC NANDs when reading the OTP area storing 150 * the read-retry parameters. 151 * 152 * It's based on a pretty simple assumption: if we repeat the same value 153 * several times and then take the one that is occurring the most, we should 154 * find the correct value. 155 * Let's hope this dummy algorithm prevents us from losing the read-retry 156 * parameters. 157 */ 158 static int hynix_get_majority(const u8 *in, int repeat, u8 *out) 159 { 160 int i, j, half = repeat / 2; 161 162 /* 163 * We only test the first half of the in array because we must ensure 164 * that the value is at least occurring repeat / 2 times. 165 * 166 * This loop is suboptimal since we may count the occurrences of the 167 * same value several time, but we are doing that on small sets, which 168 * makes it acceptable. 169 */ 170 for (i = 0; i < half; i++) { 171 int cnt = 0; 172 u8 val = in[i]; 173 174 /* Count all values that are matching the one at index i. */ 175 for (j = i + 1; j < repeat; j++) { 176 if (in[j] == val) 177 cnt++; 178 } 179 180 /* We found a value occurring more than repeat / 2. */ 181 if (cnt > half) { 182 *out = val; 183 return 0; 184 } 185 } 186 187 return -EIO; 188 } 189 190 static int hynix_read_rr_otp(struct nand_chip *chip, 191 const struct hynix_read_retry_otp *info, 192 void *buf) 193 { 194 int i, ret; 195 196 ret = nand_reset_op(chip); 197 if (ret) 198 return ret; 199 200 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS); 201 if (ret) 202 return ret; 203 204 for (i = 0; i < info->nregs; i++) { 205 ret = hynix_nand_reg_write_op(chip, info->regs[i], 206 info->values[i]); 207 if (ret) 208 return ret; 209 } 210 211 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS); 212 if (ret) 213 return ret; 214 215 /* Sequence to enter OTP mode? */ 216 ret = hynix_nand_cmd_op(chip, 0x17); 217 if (ret) 218 return ret; 219 220 ret = hynix_nand_cmd_op(chip, 0x4); 221 if (ret) 222 return ret; 223 224 ret = hynix_nand_cmd_op(chip, 0x19); 225 if (ret) 226 return ret; 227 228 /* Now read the page */ 229 ret = nand_read_page_op(chip, info->page, 0, buf, info->size); 230 if (ret) 231 return ret; 232 233 /* Put everything back to normal */ 234 ret = nand_reset_op(chip); 235 if (ret) 236 return ret; 237 238 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS); 239 if (ret) 240 return ret; 241 242 ret = hynix_nand_reg_write_op(chip, 0x38, 0); 243 if (ret) 244 return ret; 245 246 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS); 247 if (ret) 248 return ret; 249 250 return nand_read_page_op(chip, 0, 0, NULL, 0); 251 } 252 253 #define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0 254 #define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8 255 #define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \ 256 (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize))) 257 258 static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs, 259 int mode, int reg, bool inv, u8 *val) 260 { 261 u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT]; 262 int val_offs = (mode * nregs) + reg; 263 int set_size = nmodes * nregs; 264 int i, ret; 265 266 for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) { 267 int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv); 268 269 tmp[i] = buf[val_offs + set_offs]; 270 } 271 272 ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val); 273 if (ret) 274 return ret; 275 276 if (inv) 277 *val = ~*val; 278 279 return 0; 280 } 281 282 static u8 hynix_1xnm_mlc_read_retry_regs[] = { 283 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf 284 }; 285 286 static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip, 287 const struct hynix_read_retry_otp *info) 288 { 289 struct hynix_nand *hynix = nand_get_manufacturer_data(chip); 290 struct hynix_read_retry *rr = NULL; 291 int ret, i, j; 292 u8 nregs, nmodes; 293 u8 *buf; 294 295 buf = kmalloc(info->size, GFP_KERNEL); 296 if (!buf) 297 return -ENOMEM; 298 299 ret = hynix_read_rr_otp(chip, info, buf); 300 if (ret) 301 goto out; 302 303 ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT, 304 &nmodes); 305 if (ret) 306 goto out; 307 308 ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT, 309 NAND_HYNIX_1XNM_RR_REPEAT, 310 &nregs); 311 if (ret) 312 goto out; 313 314 rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL); 315 if (!rr) { 316 ret = -ENOMEM; 317 goto out; 318 } 319 320 for (i = 0; i < nmodes; i++) { 321 for (j = 0; j < nregs; j++) { 322 u8 *val = rr->values + (i * nregs); 323 324 ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j, 325 false, val); 326 if (!ret) 327 continue; 328 329 ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j, 330 true, val); 331 if (ret) 332 goto out; 333 } 334 } 335 336 rr->nregs = nregs; 337 rr->regs = hynix_1xnm_mlc_read_retry_regs; 338 hynix->read_retry = rr; 339 chip->ops.setup_read_retry = hynix_nand_setup_read_retry; 340 chip->read_retries = nmodes; 341 342 out: 343 kfree(buf); 344 345 if (ret) 346 kfree(rr); 347 348 return ret; 349 } 350 351 static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 }; 352 static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 }; 353 354 static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = { 355 { 356 .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs), 357 .regs = hynix_mlc_1xnm_rr_otp_regs, 358 .values = hynix_mlc_1xnm_rr_otp_values, 359 .page = 0x21f, 360 .size = 784 361 }, 362 { 363 .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs), 364 .regs = hynix_mlc_1xnm_rr_otp_regs, 365 .values = hynix_mlc_1xnm_rr_otp_values, 366 .page = 0x200, 367 .size = 528, 368 }, 369 }; 370 371 static int hynix_nand_rr_init(struct nand_chip *chip) 372 { 373 int i, ret = 0; 374 bool valid_jedecid; 375 376 valid_jedecid = hynix_nand_has_valid_jedecid(chip); 377 378 /* 379 * We only support read-retry for 1xnm NANDs, and those NANDs all 380 * expose a valid JEDEC ID. 381 */ 382 if (valid_jedecid) { 383 u8 nand_tech = chip->id.data[5] >> 4; 384 385 /* 1xnm technology */ 386 if (nand_tech == 4) { 387 for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps); 388 i++) { 389 /* 390 * FIXME: Hynix recommend to copy the 391 * read-retry OTP area into a normal page. 392 */ 393 ret = hynix_mlc_1xnm_rr_init(chip, 394 hynix_mlc_1xnm_rr_otps); 395 if (!ret) 396 break; 397 } 398 } 399 } 400 401 if (ret) 402 pr_warn("failed to initialize read-retry infrastructure"); 403 404 return 0; 405 } 406 407 static void hynix_nand_extract_oobsize(struct nand_chip *chip, 408 bool valid_jedecid) 409 { 410 struct mtd_info *mtd = nand_to_mtd(chip); 411 struct nand_memory_organization *memorg; 412 u8 oobsize; 413 414 memorg = nanddev_get_memorg(&chip->base); 415 416 oobsize = ((chip->id.data[3] >> 2) & 0x3) | 417 ((chip->id.data[3] >> 4) & 0x4); 418 419 if (valid_jedecid) { 420 switch (oobsize) { 421 case 0: 422 memorg->oobsize = 2048; 423 break; 424 case 1: 425 memorg->oobsize = 1664; 426 break; 427 case 2: 428 memorg->oobsize = 1024; 429 break; 430 case 3: 431 memorg->oobsize = 640; 432 break; 433 default: 434 /* 435 * We should never reach this case, but if that 436 * happens, this probably means Hynix decided to use 437 * a different extended ID format, and we should find 438 * a way to support it. 439 */ 440 WARN(1, "Invalid OOB size"); 441 break; 442 } 443 } else { 444 switch (oobsize) { 445 case 0: 446 memorg->oobsize = 128; 447 break; 448 case 1: 449 memorg->oobsize = 224; 450 break; 451 case 2: 452 memorg->oobsize = 448; 453 break; 454 case 3: 455 memorg->oobsize = 64; 456 break; 457 case 4: 458 memorg->oobsize = 32; 459 break; 460 case 5: 461 memorg->oobsize = 16; 462 break; 463 case 6: 464 memorg->oobsize = 640; 465 break; 466 default: 467 /* 468 * We should never reach this case, but if that 469 * happens, this probably means Hynix decided to use 470 * a different extended ID format, and we should find 471 * a way to support it. 472 */ 473 WARN(1, "Invalid OOB size"); 474 break; 475 } 476 477 /* 478 * The datasheet of H27UCG8T2BTR mentions that the "Redundant 479 * Area Size" is encoded "per 8KB" (page size). This chip uses 480 * a page size of 16KiB. The datasheet mentions an OOB size of 481 * 1.280 bytes, but the OOB size encoded in the ID bytes (using 482 * the existing logic above) is 640 bytes. 483 * Update the OOB size for this chip by taking the value 484 * determined above and scaling it to the actual page size (so 485 * the actual OOB size for this chip is: 640 * 16k / 8k). 486 */ 487 if (chip->id.data[1] == 0xde) 488 memorg->oobsize *= memorg->pagesize / SZ_8K; 489 } 490 491 mtd->oobsize = memorg->oobsize; 492 } 493 494 static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip, 495 bool valid_jedecid) 496 { 497 struct nand_device *base = &chip->base; 498 struct nand_ecc_props requirements = {}; 499 u8 ecc_level = (chip->id.data[4] >> 4) & 0x7; 500 501 if (valid_jedecid) { 502 /* Reference: H27UCG8T2E datasheet */ 503 requirements.step_size = 1024; 504 505 switch (ecc_level) { 506 case 0: 507 requirements.step_size = 0; 508 requirements.strength = 0; 509 break; 510 case 1: 511 requirements.strength = 4; 512 break; 513 case 2: 514 requirements.strength = 24; 515 break; 516 case 3: 517 requirements.strength = 32; 518 break; 519 case 4: 520 requirements.strength = 40; 521 break; 522 case 5: 523 requirements.strength = 50; 524 break; 525 case 6: 526 requirements.strength = 60; 527 break; 528 default: 529 /* 530 * We should never reach this case, but if that 531 * happens, this probably means Hynix decided to use 532 * a different extended ID format, and we should find 533 * a way to support it. 534 */ 535 WARN(1, "Invalid ECC requirements"); 536 } 537 } else { 538 /* 539 * The ECC requirements field meaning depends on the 540 * NAND technology. 541 */ 542 u8 nand_tech = chip->id.data[5] & 0x7; 543 544 if (nand_tech < 3) { 545 /* > 26nm, reference: H27UBG8T2A datasheet */ 546 if (ecc_level < 5) { 547 requirements.step_size = 512; 548 requirements.strength = 1 << ecc_level; 549 } else if (ecc_level < 7) { 550 if (ecc_level == 5) 551 requirements.step_size = 2048; 552 else 553 requirements.step_size = 1024; 554 requirements.strength = 24; 555 } else { 556 /* 557 * We should never reach this case, but if that 558 * happens, this probably means Hynix decided 559 * to use a different extended ID format, and 560 * we should find a way to support it. 561 */ 562 WARN(1, "Invalid ECC requirements"); 563 } 564 } else { 565 /* <= 26nm, reference: H27UBG8T2B datasheet */ 566 if (!ecc_level) { 567 requirements.step_size = 0; 568 requirements.strength = 0; 569 } else if (ecc_level < 5) { 570 requirements.step_size = 512; 571 requirements.strength = 1 << (ecc_level - 1); 572 } else { 573 requirements.step_size = 1024; 574 requirements.strength = 24 + 575 (8 * (ecc_level - 5)); 576 } 577 } 578 } 579 580 nanddev_set_ecc_requirements(base, &requirements); 581 } 582 583 static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip, 584 bool valid_jedecid) 585 { 586 u8 nand_tech; 587 588 /* We need scrambling on all TLC NANDs*/ 589 if (nanddev_bits_per_cell(&chip->base) > 2) 590 chip->options |= NAND_NEED_SCRAMBLING; 591 592 /* And on MLC NANDs with sub-3xnm process */ 593 if (valid_jedecid) { 594 nand_tech = chip->id.data[5] >> 4; 595 596 /* < 3xnm */ 597 if (nand_tech > 0) 598 chip->options |= NAND_NEED_SCRAMBLING; 599 } else { 600 nand_tech = chip->id.data[5] & 0x7; 601 602 /* < 32nm */ 603 if (nand_tech > 2) 604 chip->options |= NAND_NEED_SCRAMBLING; 605 } 606 } 607 608 static void hynix_nand_decode_id(struct nand_chip *chip) 609 { 610 struct mtd_info *mtd = nand_to_mtd(chip); 611 struct nand_memory_organization *memorg; 612 bool valid_jedecid; 613 u8 tmp; 614 615 memorg = nanddev_get_memorg(&chip->base); 616 617 /* 618 * Exclude all SLC NANDs from this advanced detection scheme. 619 * According to the ranges defined in several datasheets, it might 620 * appear that even SLC NANDs could fall in this extended ID scheme. 621 * If that the case rework the test to let SLC NANDs go through the 622 * detection process. 623 */ 624 if (chip->id.len < 6 || nand_is_slc(chip)) { 625 nand_decode_ext_id(chip); 626 return; 627 } 628 629 /* Extract pagesize */ 630 memorg->pagesize = 2048 << (chip->id.data[3] & 0x03); 631 mtd->writesize = memorg->pagesize; 632 633 tmp = (chip->id.data[3] >> 4) & 0x3; 634 /* 635 * When bit7 is set that means we start counting at 1MiB, otherwise 636 * we start counting at 128KiB and shift this value the content of 637 * ID[3][4:5]. 638 * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in 639 * this case the erasesize is set to 768KiB. 640 */ 641 if (chip->id.data[3] & 0x80) { 642 memorg->pages_per_eraseblock = (SZ_1M << tmp) / 643 memorg->pagesize; 644 mtd->erasesize = SZ_1M << tmp; 645 } else if (tmp == 3) { 646 memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) / 647 memorg->pagesize; 648 mtd->erasesize = SZ_512K + SZ_256K; 649 } else { 650 memorg->pages_per_eraseblock = (SZ_128K << tmp) / 651 memorg->pagesize; 652 mtd->erasesize = SZ_128K << tmp; 653 } 654 655 /* 656 * Modern Toggle DDR NANDs have a valid JEDECID even though they are 657 * not exposing a valid JEDEC parameter table. 658 * These NANDs use a different NAND ID scheme. 659 */ 660 valid_jedecid = hynix_nand_has_valid_jedecid(chip); 661 662 hynix_nand_extract_oobsize(chip, valid_jedecid); 663 hynix_nand_extract_ecc_requirements(chip, valid_jedecid); 664 hynix_nand_extract_scrambling_requirements(chip, valid_jedecid); 665 } 666 667 static void hynix_nand_cleanup(struct nand_chip *chip) 668 { 669 struct hynix_nand *hynix = nand_get_manufacturer_data(chip); 670 671 if (!hynix) 672 return; 673 674 kfree(hynix->read_retry); 675 kfree(hynix); 676 nand_set_manufacturer_data(chip, NULL); 677 } 678 679 static int 680 h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip, 681 struct nand_interface_config *iface) 682 { 683 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4); 684 685 return nand_choose_best_sdr_timings(chip, iface, NULL); 686 } 687 688 static int h27ucg8t2etrbc_init(struct nand_chip *chip) 689 { 690 struct mtd_info *mtd = nand_to_mtd(chip); 691 692 chip->options |= NAND_NEED_SCRAMBLING; 693 mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme); 694 695 return 0; 696 } 697 698 static int hynix_nand_init(struct nand_chip *chip) 699 { 700 struct hynix_nand *hynix; 701 int ret; 702 703 if (!nand_is_slc(chip)) 704 chip->options |= NAND_BBM_LASTPAGE; 705 else 706 chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; 707 708 hynix = kzalloc(sizeof(*hynix), GFP_KERNEL); 709 if (!hynix) 710 return -ENOMEM; 711 712 nand_set_manufacturer_data(chip, hynix); 713 714 if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model, 715 sizeof("H27UCG8T2ATR-BC") - 1)) 716 chip->ops.choose_interface_config = 717 h27ucg8t2atrbc_choose_interface_config; 718 719 if (!strncmp("H27UCG8T2ETR-BC", chip->parameters.model, 720 sizeof("H27UCG8T2ETR-BC") - 1)) 721 h27ucg8t2etrbc_init(chip); 722 723 ret = hynix_nand_rr_init(chip); 724 if (ret) 725 hynix_nand_cleanup(chip); 726 727 return ret; 728 } 729 730 static void hynix_fixup_onfi_param_page(struct nand_chip *chip, 731 struct nand_onfi_params *p) 732 { 733 /* 734 * Certain chips might report a 0 on sdr_timing_mode field 735 * (bytes 129-130). This has been seen on H27U4G8F2GDA-BI. 736 * According to ONFI specification, bit 0 of this field "shall be 1". 737 * Forcibly set this bit. 738 */ 739 p->sdr_timing_modes |= cpu_to_le16(BIT(0)); 740 } 741 742 const struct nand_manufacturer_ops hynix_nand_manuf_ops = { 743 .detect = hynix_nand_decode_id, 744 .init = hynix_nand_init, 745 .cleanup = hynix_nand_cleanup, 746 .fixup_onfi_param_page = hynix_fixup_onfi_param_page, 747 }; 748