1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/time.h> 29 #include <sys/fm/protocol.h> 30 #include <sys/cpu_module_impl.h> 31 #include <sys/mc_intel.h> 32 #include "intel_nhm.h" 33 #include "nhm_log.h" 34 #include "mem_addr.h" 35 36 char closed_page; 37 char ecc_enabled; 38 char divby3_enabled; 39 char lockstep[2]; 40 char mirror_mode[2]; 41 char spare_channel[2]; 42 sad_t sad[MAX_SAD_DRAM_RULE]; 43 tad_t tad[MAX_CPU_NODES][MAX_TAD_DRAM_RULE]; 44 sag_ch_t sag_ch[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER] 45 [MAX_TAD_DRAM_RULE]; 46 rir_t rir[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER] 47 [MAX_TAD_DRAM_RULE]; 48 dod_t dod_reg[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER] 49 [MAX_DIMMS_PER_CHANNEL]; 50 51 static int 52 channel_in_interleave(int node, int channel, int rule, int *way_p, 53 int *no_interleave_p) 54 { 55 int way; 56 int c; 57 int i; 58 uint32_t mc_channel_mapper; 59 int lc; 60 int rt = 0; 61 int start = 0; 62 63 if (lockstep[node] || mirror_mode[node]) { 64 *no_interleave_p = 0; 65 if (channel > 1) 66 return (0); 67 else 68 return (1); 69 } 70 mc_channel_mapper = MC_CHANNEL_MAPPER_RD(node); 71 lc = -1; 72 c = 1 << channel; 73 for (i = 0; i < CHANNELS_PER_MEMORY_CONTROLLER; i++) { 74 if ((CHANNEL_MAP(mc_channel_mapper, i, 0) & c) != 0) { 75 lc = i; 76 break; 77 } 78 } 79 if (lc == -1) { 80 for (i = 0; i < CHANNELS_PER_MEMORY_CONTROLLER; i++) { 81 if ((CHANNEL_MAP(mc_channel_mapper, i, 1) & c) != 0) { 82 lc = i; 83 break; 84 } 85 } 86 } 87 if (lc == -1) { 88 return (0); 89 } 90 *way_p = 0; 91 *no_interleave_p = 0; 92 if (node && tad[node][rule].mode == 2) 93 start = 4; 94 for (way = start; way < INTERLEAVE_NWAY; way++) { 95 if (lc == TAD_INTERLEAVE(tad[node][rule].pkg_list, way)) { 96 *way_p = way; 97 if (way == 0) { 98 for (i = way + 1; i < INTERLEAVE_NWAY; i++) { 99 c = TAD_INTERLEAVE( 100 tad[node][rule].pkg_list, i); 101 if (lc != c) { 102 break; 103 } 104 } 105 if (i == INTERLEAVE_NWAY) 106 *no_interleave_p = 1; 107 } 108 rt = 1; 109 break; 110 } 111 } 112 return (rt); 113 } 114 115 int 116 address_to_node(uint64_t addr, int *interleave_p) 117 { 118 int i; 119 int node = -1; 120 uint64_t base; 121 int way; 122 uchar_t package; 123 124 base = 0; 125 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) { 126 if (sad[i].enable && addr >= base && addr < sad[i].limit) { 127 switch (sad[i].mode) { 128 case 0: 129 way = (addr >> 6) & 7; 130 break; 131 case 1: 132 way = ((addr >> 6) & 7) ^ ((addr >> 16) & 7); 133 break; 134 case 2: 135 way = ((addr >> 4) & 4) | 136 (((addr >> 6) & 0x3ffffffff) % 3); 137 break; 138 default: 139 return (-1); 140 } 141 package = SAD_INTERLEAVE(sad[i].node_list, way); 142 if (interleave_p) 143 *interleave_p = sad[i].interleave; 144 if (package == 1) 145 node = 0; 146 else if (package == 2) 147 node = 1; 148 else 149 node = -1; 150 break; 151 } 152 base = sad[i].limit; 153 } 154 return (node); 155 } 156 157 static uint64_t 158 channel_address(int node, int channel, int rule, uint64_t addr) 159 { 160 uint64_t caddr; 161 162 if (lockstep[node] || mirror_mode[node]) 163 channel = 0; 164 caddr = (((addr >> 16) + 165 (int64_t)sag_ch[node][channel][rule].soffset) << 16) | 166 (addr & 0xffc0); 167 if (sag_ch[node][channel][rule].remove8) { 168 caddr = ((caddr >> 1) & ~0xff) | (caddr & 0xff); 169 } 170 if (sag_ch[node][channel][rule].remove7) { 171 caddr = ((caddr >> 1) & ~0x7f) | (caddr & 0x7f); 172 } 173 if (sag_ch[node][channel][rule].remove6) { 174 caddr = ((caddr >> 1) & ~0x3f) | (caddr & 0x3f); 175 } 176 caddr = caddr & 0x1fffffffff; 177 if (sag_ch[node][channel][rule].divby3) { 178 caddr = ((((caddr >> 6) / 3) << 6) & 0x1fffffffc0) | 179 (caddr & 0x3f); 180 } 181 return (caddr); 182 } 183 184 int 185 address_to_channel(int node, uint64_t addr, int write, 186 int *log_chan, uint64_t *channel_addrp, int *interleave_p) 187 { 188 int i; 189 int channel = -1; 190 uint64_t base; 191 uint32_t mapper; 192 uint32_t lc; 193 int way; 194 195 base = 0; 196 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) { 197 if (tad[node][i].enable && addr >= base && 198 addr < tad[node][i].limit) { 199 switch (tad[node][i].mode) { 200 case 0: 201 way = (addr >> 6) & 7; 202 break; 203 case 1: 204 way = ((addr >> 6) & 7) ^ ((addr >> 16) & 7); 205 break; 206 case 2: 207 way = ((addr >> 4) & 4) | 208 (((addr >> 6) & 0x3ffffffff) % 3); 209 break; 210 default: 211 return (-1); 212 } 213 /* get logical channel number */ 214 channel = TAD_INTERLEAVE(tad[node][i].pkg_list, way); 215 if (log_chan) 216 *log_chan = channel; 217 218 if (channel_addrp) { 219 *channel_addrp = channel_address(node, 220 channel, i, addr); 221 } 222 if (interleave_p) 223 *interleave_p = tad[node][i].interleave; 224 break; 225 } 226 base = tad[node][i].limit; 227 } 228 if (!lockstep[node] && channel != -1) { 229 mapper = MC_CHANNEL_MAPPER_RD(node); 230 lc = CHANNEL_MAP(mapper, channel, write); 231 switch (lc) { 232 case 1: 233 channel = 0; 234 break; 235 case 2: 236 channel = 1; 237 break; 238 case 4: 239 channel = 2; 240 break; 241 case 3: /* mirror PCH0 and PCH1 */ 242 if (!write) { 243 if (((addr >> 24) & 1) ^ ((addr >> 12) & 1) ^ 244 ((addr >> 6) & 1)) 245 channel = 1; 246 else 247 channel = 0; 248 } 249 break; 250 case 5: /* sparing PCH0 to PCH2 */ 251 channel = 0; 252 break; 253 case 6: /* sparing PCH1 to PCH2 */ 254 channel = 1; 255 break; 256 } 257 } 258 return (channel); 259 } 260 261 int 262 channels_interleave(uint64_t addr) 263 { 264 int node; 265 int sinterleave; 266 int channels, channels1; 267 268 node = address_to_node(addr, &sinterleave); 269 if (sinterleave == 1) { 270 channels = 0; 271 (void) address_to_channel(node, addr, 0, 0, 0, &channels); 272 } else { 273 channels = 0; 274 channels1 = 0; 275 (void) address_to_channel(0, addr, 0, 0, 0, &channels); 276 (void) address_to_channel(1, addr, 0, 0, 0, &channels1); 277 channels += channels1; 278 } 279 return (channels); 280 } 281 282 int 283 channel_addr_to_dimm(int node, int channel, uint64_t caddr, int *rank_p, 284 uint64_t *rank_addr_p) 285 { 286 int i; 287 uint64_t base; 288 uint64_t rank_addr; 289 int rank; 290 int dimm; 291 int way; 292 293 dimm = -1; 294 rank = -1; 295 base = 0; 296 rank_addr = -1ULL; 297 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) { 298 if (caddr >= base && caddr < rir[node][channel][i].limit) { 299 if (closed_page) { 300 way = (caddr >> 6) & 3; 301 rank_addr = (((caddr + (int64_t) 302 rir[node][channel][i].way[way].offset * 303 VRANK_SZ) / 304 rir[node][channel][i].interleave) & 305 ~0x3f) + (caddr & 0x3f); 306 } else { 307 way = (caddr >> 12) & 3; 308 rank_addr = (((caddr + (int64_t) 309 rir[node][channel][i].way[way].offset * 310 VRANK_SZ) / 311 rir[node][channel][i].interleave) & 312 ~0xfff) + (caddr & 0xfff); 313 } 314 rank = rir[node][channel][i].way[way].rank; 315 dimm = rank >> 2; 316 break; 317 } 318 base = rir[node][channel][i].limit; 319 } 320 *rank_p = rank; 321 *rank_addr_p = rank_addr; 322 return (dimm); 323 } 324 325 static int 326 socket_interleave(uint64_t addr, int node, int channel, int rule, 327 int *way_p) 328 { 329 int i, j; 330 uint64_t base; 331 uchar_t package; 332 uchar_t xp; 333 uchar_t xc; 334 int ot = 0; 335 int mode; 336 int start; 337 int rt = 1; 338 int found = 0; 339 340 if (mirror_mode[node] || lockstep[node]) 341 channel = 0; 342 package = node + 1; 343 mode = tad[node][rule].mode; 344 base = 0; 345 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) { 346 if (sad[i].enable && addr >= base && addr < sad[i].limit) { 347 if (mode == 2) { 348 for (j = 0; j < INTERLEAVE_NWAY; j++) { 349 xp = SAD_INTERLEAVE(sad[i].node_list, 350 j); 351 if (package != xp) { 352 ot++; 353 if (found) { 354 rt = 2; 355 break; 356 } 357 } else { 358 found = 1; 359 if (ot) { 360 rt = 2; 361 break; 362 } 363 } 364 } 365 } else { 366 if (mode == 2) 367 start = *way_p; 368 else 369 start = 0; 370 for (j = start; j < INTERLEAVE_NWAY; j++) { 371 xp = SAD_INTERLEAVE(sad[i].node_list, 372 j); 373 if (package != xp) { 374 ot++; 375 if (found) { 376 rt = 2; 377 break; 378 } 379 } else if (!found) { 380 xc = TAD_INTERLEAVE( 381 tad[node][rule].pkg_list, 382 j); 383 if (channel == xc) { 384 *way_p = j; 385 if (ot) { 386 rt = 2; 387 break; 388 } 389 found = 1; 390 } 391 } 392 } 393 } 394 break; 395 } 396 base = sad[i].limit; 397 } 398 return (rt); 399 } 400 401 uint64_t 402 dimm_to_addr(int node, int channel, int rank, uint64_t rank_addr, 403 uint64_t *rank_base_p, uint64_t *rank_sz_p, uint32_t *socket_interleave_p, 404 uint32_t *channel_interleave_p, uint32_t *rank_interleave_p, 405 uint32_t *socket_way_p, uint32_t *channel_way_p, uint32_t *rank_way_p) 406 { 407 int i; 408 int way, xway; 409 uint64_t addr; 410 uint64_t caddr; 411 uint64_t cbaddr; 412 uint64_t baddr; 413 uint64_t rlimit; 414 uint64_t rank_sz; 415 uint64_t base; 416 int lchannel; 417 int bits; 418 int no_interleave; 419 int sinterleave; 420 int cinterleave; 421 int rinterleave; 422 int found = 0; 423 424 if (lockstep[node] || mirror_mode[node]) 425 lchannel = 0; 426 else 427 lchannel = channel; 428 addr = -1; 429 base = 0; 430 for (i = 0; i < MAX_TAD_DRAM_RULE && found == 0; i++) { 431 for (way = 0; way < MAX_RIR_WAY; way++) { 432 if (rir[node][channel][i].way[way].dimm_rank == rank) { 433 rlimit = rir[node][channel][i].way[way].rlimit; 434 if (rlimit && rank_addr >= rlimit) 435 continue; 436 if (closed_page) { 437 caddr = (rank_addr & ~0x3f) * 438 rir[node][channel][i].interleave - 439 (int64_t)rir[node][channel][i]. 440 way[way].soffset * VRANK_SZ; 441 cbaddr = caddr; 442 caddr += way << 6; 443 caddr |= rank_addr & 0x3f; 444 } else { 445 caddr = (rank_addr & ~0xfff) * 446 rir[node][channel][i].interleave - 447 (int64_t)rir[node][channel][i]. 448 way[way].soffset * VRANK_SZ; 449 cbaddr = caddr; 450 caddr += way << 12; 451 caddr |= rank_addr & 0xfff; 452 } 453 if (caddr < rir[node][channel][i].limit) { 454 rinterleave = 455 rir[node][channel][i].interleave; 456 rank_sz = (rir[node][channel][i].limit - 457 base) / rinterleave; 458 found = 1; 459 if (rank_interleave_p) { 460 *rank_interleave_p = 461 rinterleave; 462 } 463 if (rank_way_p) 464 *rank_way_p = way; 465 break; 466 } 467 } 468 } 469 base = rir[node][channel][i].limit; 470 } 471 if (!found) 472 return (-1ULL); 473 base = 0; 474 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) { 475 way = 0; 476 if (tad[node][i].enable && 477 channel_in_interleave(node, channel, i, &way, 478 &no_interleave)) { 479 bits = 0; 480 addr = caddr; 481 baddr = cbaddr; 482 if (sag_ch[node][lchannel][i].divby3) { 483 addr = (((addr >> 6) * 3) << 6) + 484 (addr & 0x3f); 485 baddr = (((baddr >> 6) * 3) << 6); 486 } 487 if (sag_ch[node][lchannel][i].remove6) { 488 bits = 1; 489 addr = ((addr & ~0x3f) << 1) | (addr & 0x3f); 490 baddr = (baddr & ~0x3f) << 1; 491 } 492 if (sag_ch[node][lchannel][i].remove7) { 493 bits = bits | 2; 494 addr = ((addr & ~0x7f) << 1) | (addr & 0x7f); 495 baddr = ((baddr & ~0x7f) << 1) | (baddr & 0x40); 496 } 497 if (sag_ch[node][lchannel][i].remove8) { 498 bits = bits | 4; 499 addr = ((addr & ~0xff) << 1) | (addr & 0xff); 500 baddr = ((baddr & ~0xff) << 1) | (baddr & 0xc0); 501 } 502 addr -= (int64_t)sag_ch[node][lchannel][i].soffset << 503 16; 504 baddr -= (int64_t) 505 sag_ch[node][lchannel][i].soffset << 16; 506 if (addr < tad[node][i].limit) { 507 sinterleave = socket_interleave(addr, 508 node, channel, i, &way); 509 if (socket_interleave_p) { 510 *socket_interleave_p = sinterleave; 511 } 512 if (socket_way_p) 513 *socket_way_p = way; 514 if ((no_interleave && sinterleave == 1) || 515 mirror_mode[node] || lockstep[node]) { 516 cinterleave = 1; 517 } else { 518 cinterleave = channels_interleave(addr); 519 } 520 if (channel_interleave_p) { 521 *channel_interleave_p = cinterleave; 522 } 523 if (baddr + (rank_sz * rinterleave) > 524 tad[node][i].limit) { 525 rank_sz = (tad[node][i].limit - baddr) / 526 (cinterleave * sinterleave * 527 rinterleave); 528 } 529 if (rank_sz_p) { 530 *rank_sz_p = rank_sz; 531 } 532 if (rank_base_p) 533 *rank_base_p = baddr; 534 if (channel_way_p) 535 *channel_way_p = way; 536 if (sinterleave == 1 && no_interleave) { 537 break; 538 } 539 switch (tad[node][i].mode) { 540 case 0: 541 addr += way * 0x40; 542 break; 543 case 1: 544 way = (way ^ (addr >> 16)) & bits; 545 addr += way * 0x40; 546 break; 547 case 2: 548 if (sinterleave == 1) { 549 xway = ((addr >> 4) & 4) | 550 (((addr >> 6) & 551 0x3ffffffff) % 3); 552 if (((way - xway) & 3) == 3) 553 xway = (way - xway) & 4; 554 else 555 xway = way - xway; 556 switch (xway) { 557 case 0: 558 way = 0; 559 break; 560 case 5: 561 way = 1; 562 break; 563 case 2: 564 way = 2; 565 break; 566 case 4: 567 way = 3; 568 break; 569 case 1: 570 way = 4; 571 break; 572 case 6: 573 way = 5; 574 break; 575 } 576 } else { 577 xway = (way & 3) - 578 (((addr >> 6) & 579 0x3ffffffff) % 3); 580 if (xway < 0) 581 xway += 3; 582 switch (xway) { 583 case 0: 584 way = 0; 585 break; 586 case 1: 587 way = 1; 588 break; 589 case 2: 590 way = 2; 591 break; 592 } 593 } 594 addr += way * 0x40; 595 break; 596 } 597 break; 598 } 599 } 600 base = tad[node][i].limit; 601 } 602 return (addr); 603 } 604 /*ARGSUSED*/ 605 static cmi_errno_t 606 nhm_patounum(void *arg, uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, 607 uint32_t synd, int syndtype, mc_unum_t *unump) 608 { 609 int node; 610 int channel; 611 int dimm; 612 int rank; 613 int log_chan; 614 uint64_t bank, row, column; 615 uint64_t caddr, raddr; 616 617 node = address_to_node(pa, 0); 618 if (node == -1) { 619 return (CMIERR_UNKNOWN); 620 } 621 channel = address_to_channel(node, pa, syndtype, &log_chan, &caddr, 0); 622 if (channel == -1) { 623 return (CMIERR_UNKNOWN); 624 } 625 /* 626 * If driver was built with closed tree present then we will have Intel 627 * proprietary functions caddr_to_dimm and rankaddr_to_dimm for finding 628 * dimm/bank/row/column address otherwise we just locate dimm and 629 * offset. 630 */ 631 if (&caddr_to_dimm) 632 dimm = caddr_to_dimm(node, log_chan, caddr, &rank, &raddr); 633 else 634 dimm = channel_addr_to_dimm(node, log_chan, caddr, &rank, 635 &raddr); 636 if (dimm == -1) { 637 return (CMIERR_UNKNOWN); 638 639 } 640 unump->unum_board = 0; 641 unump->unum_chip = node; 642 unump->unum_mc = 0; 643 unump->unum_chan = channel; 644 unump->unum_cs = dimm; 645 unump->unum_rank = rank; 646 647 if (&rankaddr_to_dimm) { 648 if (rankaddr_to_dimm(raddr, node, channel, dimm, 0, &bank, &row, 649 &column) != DDI_SUCCESS) { 650 return (CMIERR_UNKNOWN); 651 }; 652 unump->unum_offset = TCODE_OFFSET(rank, bank, row, column); 653 } else { 654 unump->unum_offset = raddr; 655 } 656 657 return (CMI_SUCCESS); 658 } 659 660 /*ARGSUSED*/ 661 static cmi_errno_t 662 nhm_unumtopa(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap) 663 { 664 uint64_t pa; 665 cmi_errno_t rt; 666 int node; 667 int channel; 668 int log_chan; 669 int rank; 670 int i; 671 nvlist_t **hcl, *hcsp; 672 uint_t npr; 673 uint64_t offset; 674 char *hcnm, *hcid; 675 long v; 676 uint64_t row, bank, col; 677 int dimm; 678 uint64_t rank_addr; 679 680 if (unump == NULL) { 681 if (nvlist_lookup_nvlist(nvl, FM_FMRI_HC_SPECIFIC, 682 &hcsp) != 0) 683 return (CMIERR_UNKNOWN); 684 if (nvlist_lookup_uint64(hcsp, 685 "asru-" FM_FMRI_HC_SPECIFIC_OFFSET, &offset) != 0 && 686 nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_OFFSET, 687 &offset) != 0) { 688 if (nvlist_lookup_uint64(hcsp, 689 "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0 || 690 nvlist_lookup_uint64(hcsp, 691 FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0) { 692 *pap = pa; 693 return (CMI_SUCCESS); 694 } 695 return (CMIERR_UNKNOWN); 696 } 697 if (nvlist_lookup_nvlist_array(nvl, FM_FMRI_HC_LIST, 698 &hcl, &npr) != 0) 699 return (CMIERR_UNKNOWN); 700 node = -1; 701 channel = -1; 702 dimm = -1; 703 rank = -1; 704 for (i = 0; i < npr; i++) { 705 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, 706 &hcnm) != 0 || 707 nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, 708 &hcid) != 0 || 709 ddi_strtol(hcid, NULL, 0, &v) != 0) 710 return (CMIERR_UNKNOWN); 711 if (strcmp(hcnm, "chip") == 0) 712 node = (int)v; 713 else if (strcmp(hcnm, "dram-channel") == 0) 714 channel = (int)v; 715 else if (strcmp(hcnm, "dimm") == 0) 716 dimm = (int)v; 717 else if (strcmp(hcnm, "rank") == 0) 718 rank = (int)v; 719 } 720 if (node == -1 || channel == -1 || dimm == -1 || rank == -1) 721 return (CMIERR_UNKNOWN); 722 } else { 723 node = unump->unum_chip; 724 channel = unump->unum_chan; 725 rank = unump->unum_rank; 726 dimm = unump->unum_cs; 727 offset = unump->unum_offset; 728 } 729 730 /* 731 * If driver was built with closed tree present then we will have Intel 732 * proprietary functions dimm_to_rankaddr for finding 733 * physical address. 734 */ 735 if (&dimm_to_rankaddr && (offset & OFFSET_ROW_BANK_COL) != 0) { 736 row = TCODE_OFFSET_RAS(offset); 737 bank = TCODE_OFFSET_BANK(offset); 738 col = TCODE_OFFSET_CAS(offset); 739 rank_addr = dimm_to_rankaddr(node, channel, dimm, row, 740 bank, col, &log_chan); 741 pa = rankaddr_to_phyaddr(node, log_chan, dimm, rank, 742 rank_addr); 743 } else if ((offset & OFFSET_ROW_BANK_COL) == 0) { 744 pa = dimm_to_addr(node, channel, rank, offset, 0, 0, 0, 0, 0, 745 0, 0, 0); 746 } else { 747 pa = -1LL; 748 } 749 750 if (pa == -1) { 751 rt = CMIERR_UNKNOWN; 752 } else { 753 rt = CMI_SUCCESS; 754 *pap = pa; 755 } 756 return (rt); 757 } 758 759 static const cmi_mc_ops_t nhm_mc_ops = { 760 nhm_patounum, 761 nhm_unumtopa, 762 nhm_error_trap /* cmi_mc_logout */ 763 }; 764 765 /*ARGSUSED*/ 766 int 767 inhm_mc_register(cmi_hdl_t hdl, void *arg1, void *arg2, void *arg3) 768 { 769 cmi_mc_register(hdl, &nhm_mc_ops, NULL); 770 return (CMI_HDL_WALK_NEXT); 771 } 772 773 static int 774 choose_cpu(int *lastslot_p) 775 { 776 uint32_t id; 777 int first; 778 int last; 779 780 first = 0; 781 last = MAX_CPU_NODES; 782 id = CPU_ID_RD(0); 783 if (id == NHM_EP_CPU || id == NHM_WS_CPU || id == NHM_JF_CPU || 784 id == NHM_WM_CPU) { 785 id = CPU_ID_RD(1); 786 if (id != NHM_EP_CPU && id != NHM_WS_CPU && id != NHM_JF_CPU && 787 id != NHM_WM_CPU) { 788 last = 1; 789 } 790 } else { 791 first = 1; 792 } 793 *lastslot_p = last; 794 return (first); 795 } 796 797 static int 798 sad_interleave(uint32_t list) 799 { 800 int rt = 1; 801 int i, j; 802 int p; 803 804 for (i = 1; i < INTERLEAVE_NWAY; i++) { 805 p = SAD_INTERLEAVE(list, i); 806 for (j = 0; j < i; j++) { 807 if (p == SAD_INTERLEAVE(list, j)) 808 break; 809 } 810 if (i == j) 811 rt++; 812 } 813 return (rt); 814 } 815 816 static int 817 tad_interleave(uint32_t list) 818 { 819 int rt = 1; 820 int i, j; 821 int c; 822 823 for (i = 1; i < INTERLEAVE_NWAY; i++) { 824 c = TAD_INTERLEAVE(list, i); 825 for (j = 0; j < i; j++) { 826 if (c == TAD_INTERLEAVE(list, j)) 827 break; 828 } 829 if (i == j) 830 rt++; 831 } 832 return (rt); 833 } 834 835 static void 836 set_rank(int socket, int channel, int rule, int way, int rank, 837 uint64_t rank_addr) 838 { 839 int k, l; 840 if (rank_addr == 0) 841 return; 842 for (k = 0; k <= rule; k++) { 843 for (l = 0; l < way; l++) { 844 if (rir[socket][channel][k].way[l].dimm_rank == rank && 845 rir[socket][channel][k].way[l].rlimit == 0) { 846 rir[socket][channel][k].way[l].rlimit = 847 rank_addr; 848 } 849 } 850 } 851 } 852 853 void 854 mem_reg_init() 855 { 856 int i, j, k, l, m; 857 uint32_t sad_dram_rule; 858 uint32_t tad_dram_rule; 859 uint32_t mc_ras_enables; 860 uint32_t mc_channel_mapping; 861 uint32_t sagch; 862 uint32_t rir_limit; 863 uint32_t rir_way; 864 uint32_t mc_control; 865 uint32_t id; 866 int nhm_slot; 867 int nhm_lastslot; 868 uint8_t rank; 869 uint64_t base; 870 int ras_dev = 0; 871 uint32_t dod_value; 872 873 nhm_slot = choose_cpu(&nhm_lastslot); 874 875 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) { 876 sad_dram_rule = SAD_DRAM_RULE_RD(nhm_slot, i); 877 sad[i].enable = SAD_DRAM_RULE_ENABLE(sad_dram_rule); 878 sad[i].limit = SAD_DRAM_LIMIT(sad_dram_rule); 879 sad[i].mode = SAD_DRAM_MODE(sad_dram_rule); 880 sad[i].node_list = SAD_INTERLEAVE_LIST_RD(nhm_slot, i); 881 sad[i].interleave = sad_interleave(sad[i].node_list); 882 for (j = 0; j < INTERLEAVE_NWAY; j++) { 883 sad[i].node_tgt[j] = (sad[i].node_list >> 884 (j * 4)) & 0x3; 885 } 886 } 887 888 for (i = nhm_slot; i < nhm_lastslot; i++) { 889 id = MC_CPU_RAS_RD(i); 890 if (id == NHM_CPU_RAS || id == NHM_JF_CPU_RAS || 891 id == NHM_WM_CPU_RAS) { 892 ras_dev = 1; 893 mc_ras_enables = MC_RAS_ENABLES_RD(i); 894 if (RAS_LOCKSTEP_ENABLE(mc_ras_enables)) 895 lockstep[i] = 1; 896 if (RAS_MIRROR_MEM_ENABLE(mc_ras_enables)) 897 mirror_mode[i] = 1; 898 } 899 mc_channel_mapping = MC_CHANNEL_MAPPER_RD(i); 900 if (CHANNEL_MAP(mc_channel_mapping, 2, 0) == 0 && 901 CHANNEL_MAP(mc_channel_mapping, 2, 1) == 0) 902 spare_channel[i] = 1; 903 for (j = 0; j < MAX_TAD_DRAM_RULE; j++) { 904 tad_dram_rule = TAD_DRAM_RULE_RD(i, j); 905 tad[i][j].enable = TAD_DRAM_RULE_ENABLE(tad_dram_rule); 906 tad[i][j].limit = TAD_DRAM_LIMIT(tad_dram_rule); 907 tad[i][j].mode = TAD_DRAM_MODE(tad_dram_rule); 908 tad[i][j].pkg_list = 909 TAD_INTERLEAVE_LIST_RD(i, j); 910 for (k = 0; k < INTERLEAVE_NWAY; k++) { 911 tad[i][j].pkg_tgt[k] = ((tad[i][j].pkg_list >> 912 (k * 4)) & 0x3); 913 } 914 if (mirror_mode[i] || lockstep[i]) { 915 tad[i][j].interleave = 1; 916 } else { 917 tad[i][j].interleave = 918 tad_interleave(tad[i][j].pkg_list); 919 if (spare_channel[i] && 920 tad[i][j].interleave == 921 CHANNELS_PER_MEMORY_CONTROLLER) 922 tad[i][j].interleave--; 923 } 924 } 925 for (j = 0; j < CHANNELS_PER_MEMORY_CONTROLLER; j++) { 926 m = 0; 927 base = 0; 928 for (k = 0; k < MAX_TAD_DRAM_RULE; k++) { 929 sagch = MC_SAG_RD(i, j, k); 930 sag_ch[i][j][k].offset = 931 CH_ADDRESS_OFFSET(sagch); 932 sag_ch[i][j][k].soffset = 933 CH_ADDRESS_SOFFSET(sagch); 934 sag_ch[i][j][k].divby3 = DIVBY3(sagch); 935 sag_ch[i][j][k].remove6 = REMOVE_6(sagch); 936 sag_ch[i][j][k].remove7 = REMOVE_7(sagch); 937 sag_ch[i][j][k].remove8 = REMOVE_8(sagch); 938 939 rir_limit = MC_RIR_LIMIT_RD(i, j, k); 940 rir[i][j][k].limit = RIR_LIMIT(rir_limit); 941 for (l = 0; l < MAX_RIR_WAY; l++) { 942 rir_way = MC_RIR_WAY_RD(i, j, m); 943 rir[i][j][k].way[l].offset = 944 RIR_OFFSET(rir_way); 945 rir[i][j][k].way[l].soffset = 946 RIR_SOFFSET(rir_way); 947 rir[i][j][k].way[l].rank = 948 RIR_RANK(rir_way); 949 rir[i][j][k].way[l].dimm = 950 RIR_DIMM(rir_way); 951 rir[i][j][k].way[l].dimm_rank = 952 RIR_DIMM_RANK(rir_way); 953 rir[i][j][k].way[l].rlimit = 0; 954 m++; 955 } 956 rank = rir[i][j][k].way[0].dimm_rank; 957 if (rank == rir[i][j][k].way[1].dimm_rank && 958 rank == rir[i][j][k].way[2].dimm_rank && 959 rank == rir[i][j][k].way[3].dimm_rank) { 960 rir[i][j][k].interleave = 1; 961 } else if 962 (rank == rir[i][j][k].way[1].dimm_rank || 963 rank == rir[i][j][k].way[2].dimm_rank || 964 rank == rir[i][j][k].way[3].dimm_rank) { 965 rir[i][j][k].interleave = 2; 966 } else { 967 rir[i][j][k].interleave = 4; 968 } 969 for (l = 0; l < MAX_RIR_WAY; l++) { 970 set_rank(i, j, k, l, 971 rir[i][j][k].way[l].dimm_rank, 972 ((rir[i][j][k].way[l].soffset + 973 base) / 974 rir[i][j][k].interleave)); 975 } 976 base = rir[i][j][k].limit; 977 } 978 for (k = 0; k < MAX_DIMMS_PER_CHANNEL; k++) { 979 dod_value = MC_DOD_RD(i, j, k); 980 dod_reg[i][j][k].NUMCol = NUMCOL(dod_value); 981 dod_reg[i][j][k].NUMRow = NUMROW(dod_value); 982 dod_reg[i][j][k].NUMBank = NUMBANK(dod_value); 983 dod_reg[i][j][k].NUMRank = NUMRANK(dod_value); 984 dod_reg[i][j][k].DIMMPresent = 985 DIMMPRESENT(dod_value); 986 dod_reg[i][j][k].RankOffset = 987 RANKOFFSET(dod_value); 988 } 989 } 990 } 991 mc_control = MC_CONTROL_RD(nhm_slot); 992 closed_page = MC_CONTROL_CLOSED_PAGE(mc_control); 993 if (ras_dev) 994 ecc_enabled = MC_CONTROL_ECCEN(mc_control); 995 else if ((MC_STATUS_RD(nhm_slot) & WS_ECC_ENABLED) != 0) 996 ecc_enabled = 1; 997 divby3_enabled = MC_CONTROL_DIVBY3(mc_control); 998 } 999