1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/time.h> 29 #include <sys/fm/protocol.h> 30 #include <sys/cpu_module_impl.h> 31 #include "intel_nhm.h" 32 #include "nhm_log.h" 33 34 struct sad { 35 uint64_t limit; 36 uint32_t node_list; 37 char mode; 38 char enable; 39 char interleave; 40 } sad[MAX_SAD_DRAM_RULE]; 41 42 struct tad { 43 uint64_t limit; 44 uint32_t pkg_list; 45 char mode; 46 char enable; 47 char interleave; 48 } tad[MAX_CPU_NODES][MAX_TAD_DRAM_RULE]; 49 50 struct sag_ch { 51 int32_t offset; 52 char divby3; 53 char remove6; 54 char remove7; 55 char remove8; 56 } sag_ch[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER][MAX_TAD_DRAM_RULE]; 57 58 struct rir { 59 uint64_t limit; 60 struct rir_way { 61 int16_t offset; 62 uint8_t rank; 63 uint64_t rlimit; 64 } way[MAX_RIR_WAY]; 65 char interleave; 66 } rir[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER][MAX_TAD_DRAM_RULE]; 67 68 char closed_page; 69 char ecc_enabled; 70 char lockstep[2]; 71 char mirror_mode[2]; 72 char spare_channel[2]; 73 74 static int 75 channel_in_interleave(int node, int channel, int rule, int *way_p, 76 int *no_interleave_p) 77 { 78 int way; 79 int c; 80 int i; 81 uint32_t mc_channel_mapper; 82 int lc; 83 int rt = 0; 84 int start = 0; 85 86 if (lockstep[node] || mirror_mode[node]) { 87 *no_interleave_p = 0; 88 if (channel > 1) 89 return (0); 90 else 91 return (1); 92 } 93 mc_channel_mapper = MC_CHANNEL_MAPPER_RD(node); 94 lc = -1; 95 c = 1 << channel; 96 for (i = 0; i < CHANNELS_PER_MEMORY_CONTROLLER; i++) { 97 if ((CHANNEL_MAP(mc_channel_mapper, i, 0) & c) != 0) { 98 lc = i; 99 break; 100 } 101 } 102 if (lc == -1) { 103 for (i = 0; i < CHANNELS_PER_MEMORY_CONTROLLER; i++) { 104 if ((CHANNEL_MAP(mc_channel_mapper, i, 1) & c) != 0) { 105 lc = i; 106 break; 107 } 108 } 109 } 110 if (lc == -1) { 111 return (0); 112 } 113 *way_p = 0; 114 *no_interleave_p = 0; 115 if (node && tad[node][rule].mode == 2) 116 start = 4; 117 for (way = start; way < INTERLEAVE_NWAY; way++) { 118 if (lc == TAD_INTERLEAVE(tad[node][rule].pkg_list, way)) { 119 *way_p = way; 120 if (way == 0) { 121 for (i = way + 1; i < INTERLEAVE_NWAY; i++) { 122 c = TAD_INTERLEAVE( 123 tad[node][rule].pkg_list, i); 124 if (lc != c) { 125 break; 126 } 127 } 128 if (i == INTERLEAVE_NWAY) 129 *no_interleave_p = 1; 130 } 131 rt = 1; 132 break; 133 } 134 } 135 return (rt); 136 } 137 138 int 139 address_to_node(uint64_t addr, int *interleave_p) 140 { 141 int i; 142 int node = -1; 143 uint64_t base; 144 int way; 145 uchar_t package; 146 147 base = 0; 148 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) { 149 if (sad[i].enable && addr >= base && addr < sad[i].limit) { 150 switch (sad[i].mode) { 151 case 0: 152 way = (addr >> 6) & 7; 153 break; 154 case 1: 155 way = ((addr >> 6) & 7) ^ ((addr >> 16) & 7); 156 break; 157 case 2: 158 way = ((addr >> 4) & 4) | 159 (((addr >> 6) & 0x3ffffffff) % 3); 160 break; 161 default: 162 return (-1); 163 } 164 package = SAD_INTERLEAVE(sad[i].node_list, way); 165 if (interleave_p) 166 *interleave_p = sad[i].interleave; 167 if (package == 1) 168 node = 0; 169 else if (package == 2) 170 node = 1; 171 else 172 node = -1; 173 break; 174 } 175 base = sad[i].limit; 176 } 177 return (node); 178 } 179 180 static uint64_t 181 channel_address(int node, int channel, int rule, uint64_t addr) 182 { 183 uint64_t caddr; 184 185 if (lockstep[node] || mirror_mode[node]) 186 channel = 0; 187 caddr = (((addr >> 16) + 188 (int64_t)sag_ch[node][channel][rule].offset) << 16) | 189 (addr & 0xffc0); 190 if (sag_ch[node][channel][rule].remove8) { 191 caddr = ((caddr >> 1) & ~0xff) | (caddr & 0xff); 192 } 193 if (sag_ch[node][channel][rule].remove7) { 194 caddr = ((caddr >> 1) & ~0x7f) | (caddr & 0x7f); 195 } 196 if (sag_ch[node][channel][rule].remove6) { 197 caddr = ((caddr >> 1) & ~0x3f) | (caddr & 0x3f); 198 } 199 caddr = caddr & 0x1fffffffff; 200 if (sag_ch[node][channel][rule].divby3) { 201 caddr = ((((caddr >> 6) / 3) << 6) & 0x1fffffffc0) | 202 (caddr & 0x3f); 203 } 204 return (caddr); 205 } 206 207 int 208 address_to_channel(int node, uint64_t addr, int write, uint64_t *channel_addrp, 209 int *interleave_p) 210 { 211 int i; 212 int channel = -1; 213 uint64_t base; 214 uint32_t mapper; 215 uint32_t lc; 216 int way; 217 218 base = 0; 219 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) { 220 if (tad[node][i].enable && addr >= base && 221 addr < tad[node][i].limit) { 222 switch (tad[node][i].mode) { 223 case 0: 224 way = (addr >> 6) & 7; 225 break; 226 case 1: 227 way = ((addr >> 6) & 7) ^ ((addr >> 16) & 7); 228 break; 229 case 2: 230 way = ((addr >> 4) & 4) | 231 (((addr >> 6) & 0x3ffffffff) % 3); 232 break; 233 default: 234 return (-1); 235 } 236 channel = TAD_INTERLEAVE(tad[node][i].pkg_list, way); 237 if (channel_addrp) { 238 *channel_addrp = channel_address(node, channel, 239 i, addr); 240 } 241 if (interleave_p) 242 *interleave_p = tad[node][i].interleave; 243 break; 244 } 245 base = tad[node][i].limit; 246 } 247 if (!lockstep[node] && channel != -1) { 248 mapper = MC_CHANNEL_MAPPER_RD(node); 249 lc = CHANNEL_MAP(mapper, channel, write); 250 switch (lc) { 251 case 1: 252 channel = 0; 253 break; 254 case 2: 255 channel = 1; 256 break; 257 case 4: 258 channel = 2; 259 break; 260 case 3: /* mirror PCH0 and PCH1 */ 261 if (!write) { 262 if (((addr >> 24) & 1) ^ ((addr >> 12) & 1) ^ 263 ((addr >> 6) & 1)) 264 channel = 1; 265 else 266 channel = 0; 267 } 268 break; 269 case 5: /* sparing PCH0 to PCH2 */ 270 channel = 0; 271 break; 272 case 6: /* sparing PCH1 to PCH2 */ 273 channel = 1; 274 break; 275 } 276 } 277 return (channel); 278 } 279 280 int 281 channels_interleave(uint64_t addr) 282 { 283 int node; 284 int sinterleave; 285 int channels, channels1; 286 287 node = address_to_node(addr, &sinterleave); 288 if (sinterleave == 1) { 289 channels = 0; 290 (void) address_to_channel(node, addr, 0, 0, &channels); 291 } else { 292 channels = 0; 293 channels1 = 0; 294 (void) address_to_channel(0, addr, 0, 0, &channels); 295 (void) address_to_channel(1, addr, 0, 0, &channels1); 296 channels += channels1; 297 } 298 return (channels); 299 } 300 301 302 int 303 caddr_to_dimm(int node, int channel, uint64_t caddr, int *rank_p, 304 uint64_t *rank_addr_p) 305 { 306 int i; 307 uint64_t base; 308 uint64_t rank_addr; 309 int rank; 310 int dimm; 311 int way; 312 313 dimm = -1; 314 rank = -1; 315 base = 0; 316 rank_addr = -1ULL; 317 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) { 318 if (caddr >= base && caddr < rir[node][channel][i].limit) { 319 if (closed_page) { 320 way = (caddr >> 6) & 3; 321 rank_addr = (((caddr + (int64_t) 322 rir[node][channel][i].way[way].offset * 323 VRANK_SZ) / 324 rir[node][channel][i].interleave) & 325 ~0x3f) + (caddr & 0x3f); 326 } else { 327 way = (caddr >> 12) & 3; 328 rank_addr = (((caddr + (int64_t) 329 rir[node][channel][i].way[way].offset * 330 VRANK_SZ) / 331 rir[node][channel][i].interleave) & 332 ~0xfff) + (caddr & 0xfff); 333 } 334 rank = rir[node][channel][i].way[way].rank; 335 dimm = rank >> 2; 336 break; 337 } 338 base = rir[node][channel][i].limit; 339 } 340 *rank_p = rank; 341 *rank_addr_p = rank_addr; 342 return (dimm); 343 } 344 345 static int 346 socket_interleave(uint64_t addr, int node, int channel, int rule, 347 int *way_p) 348 { 349 int i, j; 350 uint64_t base; 351 uchar_t package; 352 uchar_t xp; 353 uchar_t xc; 354 int ot = 0; 355 int mode; 356 int start; 357 int rt = 1; 358 int found = 0; 359 360 if (mirror_mode[node] || lockstep[node]) 361 channel = 0; 362 package = node + 1; 363 mode = tad[node][rule].mode; 364 base = 0; 365 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) { 366 if (sad[i].enable && addr >= base && addr < sad[i].limit) { 367 if (mode == 2) { 368 for (j = 0; j < INTERLEAVE_NWAY; j++) { 369 xp = SAD_INTERLEAVE(sad[i].node_list, 370 j); 371 if (package != xp) { 372 ot++; 373 if (found) { 374 rt = 2; 375 break; 376 } 377 } else { 378 found = 1; 379 if (ot) { 380 rt = 2; 381 break; 382 } 383 } 384 } 385 } else { 386 if (mode == 2) 387 start = *way_p; 388 else 389 start = 0; 390 for (j = start; j < INTERLEAVE_NWAY; j++) { 391 xp = SAD_INTERLEAVE(sad[i].node_list, 392 j); 393 if (package != xp) { 394 ot++; 395 if (found) { 396 rt = 2; 397 break; 398 } 399 } else if (!found) { 400 xc = TAD_INTERLEAVE( 401 tad[node][rule].pkg_list, 402 j); 403 if (channel == xc) { 404 *way_p = j; 405 if (ot) { 406 rt = 2; 407 break; 408 } 409 found = 1; 410 } 411 } 412 } 413 } 414 break; 415 } 416 base = sad[i].limit; 417 } 418 return (rt); 419 } 420 421 uint64_t 422 dimm_to_addr(int node, int channel, int rank, uint64_t rank_addr, 423 uint64_t *rank_base_p, uint64_t *rank_sz_p, uint32_t *socket_interleave_p, 424 uint32_t *channel_interleave_p, uint32_t *rank_interleave_p, 425 uint32_t *socket_way_p, uint32_t *channel_way_p, uint32_t *rank_way_p) 426 { 427 int i; 428 int way, xway; 429 uint64_t addr; 430 uint64_t caddr; 431 uint64_t cbaddr; 432 uint64_t baddr; 433 uint64_t rlimit; 434 uint64_t rank_sz; 435 uint64_t base; 436 int lchannel; 437 int bits; 438 int no_interleave; 439 int sinterleave; 440 int cinterleave; 441 int rinterleave; 442 int found = 0; 443 444 if (lockstep[node] || mirror_mode[node]) 445 lchannel = 0; 446 else 447 lchannel = channel; 448 addr = -1; 449 base = 0; 450 for (i = 0; i < MAX_TAD_DRAM_RULE && found == 0; i++) { 451 for (way = 0; way < MAX_RIR_WAY; way++) { 452 if (rir[node][channel][i].way[way].rank == rank) { 453 rlimit = rir[node][channel][i].way[way].rlimit; 454 if (rlimit && rank_addr >= rlimit) 455 continue; 456 if (closed_page) { 457 caddr = (rank_addr & ~0x3f) * 458 rir[node][channel][i].interleave - 459 (int64_t)rir[node][channel][i]. 460 way[way].offset * VRANK_SZ; 461 cbaddr = caddr; 462 caddr += way << 6; 463 caddr |= rank_addr & 0x3f; 464 } else { 465 caddr = (rank_addr & ~0xfff) * 466 rir[node][channel][i].interleave - 467 (int64_t)rir[node][channel][i]. 468 way[way].offset * VRANK_SZ; 469 cbaddr = caddr; 470 caddr += way << 12; 471 caddr |= rank_addr & 0xfff; 472 } 473 if (caddr < rir[node][channel][i].limit) { 474 rinterleave = 475 rir[node][channel][i].interleave; 476 rank_sz = (rir[node][channel][i].limit - 477 base) / rinterleave; 478 found = 1; 479 if (rank_interleave_p) { 480 *rank_interleave_p = 481 rinterleave; 482 } 483 if (rank_way_p) 484 *rank_way_p = way; 485 break; 486 } 487 } 488 } 489 base = rir[node][channel][i].limit; 490 } 491 if (!found) 492 return (-1ULL); 493 base = 0; 494 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) { 495 way = 0; 496 if (tad[node][i].enable && 497 channel_in_interleave(node, channel, i, &way, 498 &no_interleave)) { 499 bits = 0; 500 addr = caddr; 501 baddr = cbaddr; 502 if (sag_ch[node][lchannel][i].divby3) { 503 addr = (((addr >> 6) * 3) << 6) + 504 (addr & 0x3f); 505 baddr = (((baddr >> 6) * 3) << 6); 506 } 507 if (sag_ch[node][lchannel][i].remove6) { 508 bits = 1; 509 addr = ((addr & ~0x3f) << 1) | (addr & 0x3f); 510 baddr = (baddr & ~0x3f) << 1; 511 } 512 if (sag_ch[node][lchannel][i].remove7) { 513 bits = bits | 2; 514 addr = ((addr & ~0x7f) << 1) | (addr & 0x7f); 515 baddr = ((baddr & ~0x7f) << 1) | (baddr & 0x40); 516 } 517 if (sag_ch[node][lchannel][i].remove8) { 518 bits = bits | 4; 519 addr = ((addr & ~0xff) << 1) | (addr & 0xff); 520 baddr = ((baddr & ~0xff) << 1) | (baddr & 0xc0); 521 } 522 addr -= (int64_t)sag_ch[node][lchannel][i].offset << 16; 523 baddr -= (int64_t) 524 sag_ch[node][lchannel][i].offset << 16; 525 if (addr < tad[node][i].limit) { 526 sinterleave = socket_interleave(addr, 527 node, channel, i, &way); 528 if (socket_interleave_p) { 529 *socket_interleave_p = sinterleave; 530 } 531 if (socket_way_p) 532 *socket_way_p = way; 533 if ((no_interleave && sinterleave == 1) || 534 mirror_mode[node] || lockstep[node]) { 535 cinterleave = 1; 536 } else { 537 cinterleave = channels_interleave(addr); 538 } 539 if (channel_interleave_p) { 540 *channel_interleave_p = cinterleave; 541 } 542 if (baddr + (rank_sz * rinterleave) > 543 tad[node][i].limit) { 544 rank_sz = (tad[node][i].limit - baddr) / 545 (cinterleave * sinterleave * 546 rinterleave); 547 } 548 if (rank_sz_p) { 549 *rank_sz_p = rank_sz; 550 } 551 if (rank_base_p) 552 *rank_base_p = baddr; 553 if (channel_way_p) 554 *channel_way_p = way; 555 if (sinterleave == 1 && no_interleave) { 556 break; 557 } 558 switch (tad[node][i].mode) { 559 case 0: 560 addr += way * 0x40; 561 break; 562 case 1: 563 way = (way ^ (addr >> 16)) & bits; 564 addr += way * 0x40; 565 break; 566 case 2: 567 if (sinterleave == 1) { 568 xway = ((addr >> 4) & 4) | 569 (((addr >> 6) & 570 0x3ffffffff) % 3); 571 if (((way - xway) & 3) == 3) 572 xway = (way - xway) & 4; 573 else 574 xway = way - xway; 575 switch (xway) { 576 case 0: 577 way = 0; 578 break; 579 case 5: 580 way = 1; 581 break; 582 case 2: 583 way = 2; 584 break; 585 case 4: 586 way = 3; 587 break; 588 case 1: 589 way = 4; 590 break; 591 case 6: 592 way = 5; 593 break; 594 } 595 } else { 596 xway = (way & 3) - 597 (((addr >> 6) & 598 0x3ffffffff) % 3); 599 if (xway < 0) 600 xway += 3; 601 switch (xway) { 602 case 0: 603 way = 0; 604 break; 605 case 1: 606 way = 1; 607 break; 608 case 2: 609 way = 2; 610 break; 611 } 612 } 613 addr += way * 0x40; 614 break; 615 } 616 break; 617 } 618 } 619 base = tad[node][i].limit; 620 } 621 return (addr); 622 } 623 624 /*ARGSUSED*/ 625 static cmi_errno_t 626 nhm_patounum(void *arg, uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, 627 uint32_t synd, int syndtype, mc_unum_t *unump) 628 { 629 int node; 630 int channel; 631 int dimm; 632 int rank; 633 uint64_t caddr, raddr; 634 635 node = address_to_node(pa, 0); 636 if (node == -1) 637 return (CMIERR_UNKNOWN); 638 channel = address_to_channel(node, pa, syndtype, &caddr, 0); 639 if (channel == -1) 640 return (CMIERR_UNKNOWN); 641 dimm = caddr_to_dimm(node, channel, caddr, &rank, &raddr); 642 if (dimm == -1) 643 return (CMIERR_UNKNOWN); 644 645 unump->unum_board = 0; 646 unump->unum_chip = node; 647 unump->unum_mc = 0; 648 unump->unum_chan = channel; 649 unump->unum_cs = dimm; 650 unump->unum_rank = rank; 651 unump->unum_offset = raddr; 652 653 return (CMI_SUCCESS); 654 } 655 656 /*ARGSUSED*/ 657 static cmi_errno_t 658 nhm_unumtopa(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap) 659 { 660 uint64_t pa; 661 cmi_errno_t rt; 662 int node; 663 int channel; 664 int rank; 665 int i; 666 nvlist_t **hcl, *hcsp; 667 uint_t npr; 668 uint64_t rank_addr; 669 char *hcnm, *hcid; 670 long v; 671 672 if (unump == NULL) { 673 if (nvlist_lookup_nvlist(nvl, FM_FMRI_HC_SPECIFIC, 674 &hcsp) != 0) 675 return (CMIERR_UNKNOWN); 676 if (nvlist_lookup_uint64(hcsp, 677 "asru-" FM_FMRI_HC_SPECIFIC_OFFSET, &rank_addr) != 0 && 678 nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_OFFSET, 679 &rank_addr) != 0) { 680 if (nvlist_lookup_uint64(hcsp, 681 "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0 || 682 nvlist_lookup_uint64(hcsp, 683 FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0) { 684 *pap = pa; 685 return (CMI_SUCCESS); 686 } 687 return (CMIERR_UNKNOWN); 688 } 689 if (nvlist_lookup_nvlist_array(nvl, FM_FMRI_HC_LIST, 690 &hcl, &npr) != 0) 691 return (CMIERR_UNKNOWN); 692 node = -1; 693 channel = -1; 694 rank = -1; 695 for (i = 0; i < npr; i++) { 696 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, 697 &hcnm) != 0 || 698 nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, 699 &hcid) != 0 || 700 ddi_strtol(hcid, NULL, 0, &v) != 0) 701 return (CMIERR_UNKNOWN); 702 if (strcmp(hcnm, "chip") == 0) 703 node = (int)v; 704 else if (strcmp(hcnm, "dram-channel") == 0) 705 channel = (int)v; 706 else if (strcmp(hcnm, "rank") == 0) 707 rank = (int)v; 708 } 709 if (node == -1 || channel == -1 || rank == -1) 710 return (CMIERR_UNKNOWN); 711 } else { 712 node = unump->unum_chip; 713 channel = unump->unum_chan; 714 rank = unump->unum_rank; 715 rank_addr = unump->unum_offset; 716 } 717 pa = dimm_to_addr(node, channel, rank, rank_addr, 0, 0, 0, 0, 0, 0, 0, 718 0); 719 if (pa == -1) { 720 rt = CMIERR_UNKNOWN; 721 } else { 722 rt = CMI_SUCCESS; 723 *pap = pa; 724 } 725 return (rt); 726 } 727 728 static const cmi_mc_ops_t nhm_mc_ops = { 729 nhm_patounum, 730 nhm_unumtopa, 731 nhm_error_trap /* cmi_mc_logout */ 732 }; 733 734 /*ARGSUSED*/ 735 int 736 inhm_mc_register(cmi_hdl_t hdl, void *arg1, void *arg2, void *arg3) 737 { 738 cmi_mc_register(hdl, &nhm_mc_ops, NULL); 739 return (CMI_HDL_WALK_NEXT); 740 } 741 742 static int 743 choose_cpu(int *lastslot_p) 744 { 745 uint32_t id; 746 int first; 747 int last; 748 749 first = 0; 750 last = MAX_CPU_NODES; 751 id = CPU_ID_RD(0); 752 if (id == NHM_EP_CPU || id == NHM_WS_CPU) { 753 id = CPU_ID_RD(1); 754 if (id != NHM_EP_CPU && id != NHM_WS_CPU) { 755 last = 1; 756 } 757 } else { 758 first = 1; 759 } 760 *lastslot_p = last; 761 return (first); 762 } 763 764 static int 765 sad_interleave(uint32_t list) 766 { 767 int rt = 1; 768 int i, j; 769 int p; 770 771 for (i = 1; i < INTERLEAVE_NWAY; i++) { 772 p = SAD_INTERLEAVE(list, i); 773 for (j = 0; j < i; j++) { 774 if (p == SAD_INTERLEAVE(list, j)) 775 break; 776 } 777 if (i == j) 778 rt++; 779 } 780 return (rt); 781 } 782 783 static int 784 tad_interleave(uint32_t list) 785 { 786 int rt = 1; 787 int i, j; 788 int c; 789 790 for (i = 1; i < INTERLEAVE_NWAY; i++) { 791 c = TAD_INTERLEAVE(list, i); 792 for (j = 0; j < i; j++) { 793 if (c == TAD_INTERLEAVE(list, j)) 794 break; 795 } 796 if (i == j) 797 rt++; 798 } 799 return (rt); 800 } 801 802 static void 803 set_rank(int socket, int channel, int rule, int way, int rank, 804 uint64_t rank_addr) 805 { 806 int k, l; 807 if (rank_addr == 0) 808 return; 809 for (k = 0; k <= rule; k++) { 810 for (l = 0; l < way; l++) { 811 if (rir[socket][channel][k].way[l].rank == rank && 812 rir[socket][channel][k].way[l].rlimit == 0) { 813 rir[socket][channel][k].way[l].rlimit = 814 rank_addr; 815 } 816 } 817 } 818 } 819 820 void 821 mem_reg_init() 822 { 823 int i, j, k, l, m; 824 uint32_t sad_dram_rule; 825 uint32_t tad_dram_rule; 826 uint32_t mc_ras_enables; 827 uint32_t mc_channel_mapping; 828 uint32_t sagch; 829 uint32_t rir_limit; 830 uint32_t rir_way; 831 uint32_t mc_control; 832 int nhm_slot; 833 int nhm_lastslot; 834 uint8_t rank; 835 uint64_t base; 836 837 nhm_slot = choose_cpu(&nhm_lastslot); 838 839 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) { 840 sad_dram_rule = SAD_DRAM_RULE_RD(nhm_slot, i); 841 sad[i].enable = SAD_DRAM_RULE_ENABLE(sad_dram_rule); 842 sad[i].limit = SAD_DRAM_LIMIT(sad_dram_rule); 843 sad[i].mode = SAD_DRAM_MODE(sad_dram_rule); 844 sad[i].node_list = SAD_INTERLEAVE_LIST_RD(nhm_slot, i); 845 sad[i].interleave = sad_interleave(sad[i].node_list); 846 } 847 848 for (i = nhm_slot; i < nhm_lastslot; i++) { 849 if (MC_CPU_RAS_RD(i) == NHM_CPU_RAS) { 850 mc_ras_enables = MC_RAS_ENABLES_RD(i); 851 if (RAS_LOCKSTEP_ENABLE(mc_ras_enables)) 852 lockstep[i] = 1; 853 if (RAS_MIRROR_MEM_ENABLE(mc_ras_enables)) 854 mirror_mode[i] = 1; 855 } 856 mc_channel_mapping = MC_CHANNEL_MAPPER_RD(i); 857 if (CHANNEL_MAP(mc_channel_mapping, 2, 0) == 0 && 858 CHANNEL_MAP(mc_channel_mapping, 2, 1) == 0) 859 spare_channel[i] = 1; 860 for (j = 0; j < MAX_TAD_DRAM_RULE; j++) { 861 tad_dram_rule = TAD_DRAM_RULE_RD(i, j); 862 tad[i][j].enable = TAD_DRAM_RULE_ENABLE(tad_dram_rule); 863 tad[i][j].limit = TAD_DRAM_LIMIT(tad_dram_rule); 864 tad[i][j].mode = TAD_DRAM_MODE(tad_dram_rule); 865 tad[i][j].pkg_list = 866 TAD_INTERLEAVE_LIST_RD(i, j); 867 if (mirror_mode[i] || lockstep[i]) { 868 tad[i][j].interleave = 1; 869 } else { 870 tad[i][j].interleave = 871 tad_interleave(tad[i][j].pkg_list); 872 if (spare_channel[i] && 873 tad[i][j].interleave == 874 CHANNELS_PER_MEMORY_CONTROLLER) 875 tad[i][j].interleave--; 876 } 877 } 878 for (j = 0; j < CHANNELS_PER_MEMORY_CONTROLLER; j++) { 879 m = 0; 880 base = 0; 881 for (k = 0; k < MAX_TAD_DRAM_RULE; k++) { 882 sagch = MC_SAG_RD(i, j, k); 883 sag_ch[i][j][k].offset = 884 CH_ADDRESS_OFFSET(sagch); 885 sag_ch[i][j][k].divby3 = DIVBY3(sagch); 886 sag_ch[i][j][k].remove6 = REMOVE_6(sagch); 887 sag_ch[i][j][k].remove7 = REMOVE_7(sagch); 888 sag_ch[i][j][k].remove8 = REMOVE_8(sagch); 889 890 rir_limit = MC_RIR_LIMIT_RD(i, j, k); 891 rir[i][j][k].limit = RIR_LIMIT(rir_limit); 892 for (l = 0; l < MAX_RIR_WAY; l++) { 893 rir_way = MC_RIR_WAY_RD(i, j, m); 894 rir[i][j][k].way[l].offset = 895 RIR_OFFSET(rir_way); 896 rir[i][j][k].way[l].rank = 897 RIR_RANK(rir_way); 898 rir[i][j][k].way[l].rlimit = 0; 899 m++; 900 } 901 rank = rir[i][j][k].way[0].rank; 902 if (rank == rir[i][j][k].way[1].rank && 903 rank == rir[i][j][k].way[2].rank && 904 rank == rir[i][j][k].way[3].rank) { 905 rir[i][j][k].interleave = 1; 906 } else if (rank == rir[i][j][k].way[1].rank || 907 rank == rir[i][j][k].way[2].rank || 908 rank == rir[i][j][k].way[3].rank) { 909 rir[i][j][k].interleave = 2; 910 } else { 911 rir[i][j][k].interleave = 4; 912 } 913 for (l = 0; l < MAX_RIR_WAY; l++) { 914 set_rank(i, j, k, l, 915 rir[i][j][k].way[l].rank, 916 ((rir[i][j][k].way[l].offset + 917 base) / 918 rir[i][j][k].interleave)); 919 } 920 base = rir[i][j][k].limit; 921 } 922 } 923 } 924 mc_control = MC_CONTROL_RD(nhm_slot); 925 closed_page = MC_CONTROL_CLOSED_PAGE(mc_control); 926 if (MC_CPU_RAS_RD(nhm_slot) == NHM_CPU_RAS) 927 ecc_enabled = MC_CONTROL_ECCEN(mc_control); 928 else if ((MC_STATUS_RD(nhm_slot) & WS_ECC_ENABLED) != 0) 929 ecc_enabled = 1; 930 } 931