1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/time.h>
29 #include <sys/fm/protocol.h>
30 #include <sys/cpu_module_impl.h>
31 #include <sys/mc_intel.h>
32 #include "intel_nhm.h"
33 #include "nhm_log.h"
34 #include "mem_addr.h"
35
36 char closed_page;
37 char ecc_enabled;
38 char divby3_enabled;
39 char lockstep[2];
40 char mirror_mode[2];
41 char spare_channel[2];
42 sad_t sad[MAX_SAD_DRAM_RULE];
43 tad_t tad[MAX_CPU_NODES][MAX_TAD_DRAM_RULE];
44 sag_ch_t sag_ch[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER]
45 [MAX_TAD_DRAM_RULE];
46 rir_t rir[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER]
47 [MAX_TAD_DRAM_RULE];
48 dod_t dod_reg[MAX_CPU_NODES][CHANNELS_PER_MEMORY_CONTROLLER]
49 [MAX_DIMMS_PER_CHANNEL];
50
51 static int
channel_in_interleave(int node,int channel,int rule,int * way_p,int * no_interleave_p)52 channel_in_interleave(int node, int channel, int rule, int *way_p,
53 int *no_interleave_p)
54 {
55 int way;
56 int c;
57 int i;
58 uint32_t mc_channel_mapper;
59 int lc;
60 int rt = 0;
61 int start = 0;
62
63 if (lockstep[node] || mirror_mode[node]) {
64 *no_interleave_p = 0;
65 if (channel > 1)
66 return (0);
67 else
68 return (1);
69 }
70 mc_channel_mapper = MC_CHANNEL_MAPPER_RD(node);
71 lc = -1;
72 c = 1 << channel;
73 for (i = 0; i < CHANNELS_PER_MEMORY_CONTROLLER; i++) {
74 if ((CHANNEL_MAP(mc_channel_mapper, i, 0) & c) != 0) {
75 lc = i;
76 break;
77 }
78 }
79 if (lc == -1) {
80 for (i = 0; i < CHANNELS_PER_MEMORY_CONTROLLER; i++) {
81 if ((CHANNEL_MAP(mc_channel_mapper, i, 1) & c) != 0) {
82 lc = i;
83 break;
84 }
85 }
86 }
87 if (lc == -1) {
88 return (0);
89 }
90 *way_p = 0;
91 *no_interleave_p = 0;
92 if (node && tad[node][rule].mode == 2)
93 start = 4;
94 for (way = start; way < INTERLEAVE_NWAY; way++) {
95 if (lc == TAD_INTERLEAVE(tad[node][rule].pkg_list, way)) {
96 *way_p = way;
97 if (way == 0) {
98 for (i = way + 1; i < INTERLEAVE_NWAY; i++) {
99 c = TAD_INTERLEAVE(
100 tad[node][rule].pkg_list, i);
101 if (lc != c) {
102 break;
103 }
104 }
105 if (i == INTERLEAVE_NWAY)
106 *no_interleave_p = 1;
107 }
108 rt = 1;
109 break;
110 }
111 }
112 return (rt);
113 }
114
115 int
address_to_node(uint64_t addr,int * interleave_p)116 address_to_node(uint64_t addr, int *interleave_p)
117 {
118 int i;
119 int node = -1;
120 uint64_t base;
121 int way;
122 uchar_t package;
123
124 base = 0;
125 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) {
126 if (sad[i].enable && addr >= base && addr < sad[i].limit) {
127 switch (sad[i].mode) {
128 case 0:
129 way = (addr >> 6) & 7;
130 break;
131 case 1:
132 way = ((addr >> 6) & 7) ^ ((addr >> 16) & 7);
133 break;
134 case 2:
135 way = ((addr >> 4) & 4) |
136 (((addr >> 6) & 0x3ffffffff) % 3);
137 break;
138 default:
139 return (-1);
140 }
141 package = SAD_INTERLEAVE(sad[i].node_list, way);
142 if (interleave_p)
143 *interleave_p = sad[i].interleave;
144 if (package == 1)
145 node = 0;
146 else if (package == 2)
147 node = 1;
148 else
149 node = -1;
150 break;
151 }
152 base = sad[i].limit;
153 }
154 return (node);
155 }
156
157 static uint64_t
channel_address(int node,int channel,int rule,uint64_t addr)158 channel_address(int node, int channel, int rule, uint64_t addr)
159 {
160 uint64_t caddr;
161
162 if (lockstep[node] || mirror_mode[node])
163 channel = 0;
164 caddr = (((addr >> 16) +
165 (int64_t)sag_ch[node][channel][rule].soffset) << 16) |
166 (addr & 0xffc0);
167 if (sag_ch[node][channel][rule].remove8) {
168 caddr = ((caddr >> 1) & ~0xff) | (caddr & 0xff);
169 }
170 if (sag_ch[node][channel][rule].remove7) {
171 caddr = ((caddr >> 1) & ~0x7f) | (caddr & 0x7f);
172 }
173 if (sag_ch[node][channel][rule].remove6) {
174 caddr = ((caddr >> 1) & ~0x3f) | (caddr & 0x3f);
175 }
176 caddr = caddr & 0x1fffffffff;
177 if (sag_ch[node][channel][rule].divby3) {
178 caddr = ((((caddr >> 6) / 3) << 6) & 0x1fffffffc0) |
179 (caddr & 0x3f);
180 }
181 return (caddr);
182 }
183
184 int
address_to_channel(int node,uint64_t addr,int write,int * log_chan,uint64_t * channel_addrp,int * interleave_p)185 address_to_channel(int node, uint64_t addr, int write,
186 int *log_chan, uint64_t *channel_addrp, int *interleave_p)
187 {
188 int i;
189 int channel = -1;
190 uint64_t base;
191 uint32_t mapper;
192 uint32_t lc;
193 int way;
194
195 base = 0;
196 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) {
197 if (tad[node][i].enable && addr >= base &&
198 addr < tad[node][i].limit) {
199 switch (tad[node][i].mode) {
200 case 0:
201 way = (addr >> 6) & 7;
202 break;
203 case 1:
204 way = ((addr >> 6) & 7) ^ ((addr >> 16) & 7);
205 break;
206 case 2:
207 way = ((addr >> 4) & 4) |
208 (((addr >> 6) & 0x3ffffffff) % 3);
209 break;
210 default:
211 return (-1);
212 }
213 /* get logical channel number */
214 channel = TAD_INTERLEAVE(tad[node][i].pkg_list, way);
215 if (log_chan)
216 *log_chan = channel;
217
218 if (channel_addrp) {
219 *channel_addrp = channel_address(node,
220 channel, i, addr);
221 }
222 if (interleave_p)
223 *interleave_p = tad[node][i].interleave;
224 break;
225 }
226 base = tad[node][i].limit;
227 }
228 if (!lockstep[node] && channel != -1) {
229 mapper = MC_CHANNEL_MAPPER_RD(node);
230 lc = CHANNEL_MAP(mapper, channel, write);
231 switch (lc) {
232 case 1:
233 channel = 0;
234 break;
235 case 2:
236 channel = 1;
237 break;
238 case 4:
239 channel = 2;
240 break;
241 case 3: /* mirror PCH0 and PCH1 */
242 if (!write) {
243 if (((addr >> 24) & 1) ^ ((addr >> 12) & 1) ^
244 ((addr >> 6) & 1))
245 channel = 1;
246 else
247 channel = 0;
248 }
249 break;
250 case 5: /* sparing PCH0 to PCH2 */
251 channel = 0;
252 break;
253 case 6: /* sparing PCH1 to PCH2 */
254 channel = 1;
255 break;
256 }
257 }
258 return (channel);
259 }
260
261 int
channels_interleave(uint64_t addr)262 channels_interleave(uint64_t addr)
263 {
264 int node;
265 int sinterleave;
266 int channels, channels1;
267
268 node = address_to_node(addr, &sinterleave);
269 if (sinterleave == 1) {
270 channels = 0;
271 (void) address_to_channel(node, addr, 0, 0, 0, &channels);
272 } else {
273 channels = 0;
274 channels1 = 0;
275 (void) address_to_channel(0, addr, 0, 0, 0, &channels);
276 (void) address_to_channel(1, addr, 0, 0, 0, &channels1);
277 channels += channels1;
278 }
279 return (channels);
280 }
281
282 int
channel_addr_to_dimm(int node,int channel,uint64_t caddr,int * rank_p,uint64_t * rank_addr_p)283 channel_addr_to_dimm(int node, int channel, uint64_t caddr, int *rank_p,
284 uint64_t *rank_addr_p)
285 {
286 int i;
287 uint64_t base;
288 uint64_t rank_addr;
289 int rank;
290 int dimm;
291 int way;
292
293 dimm = -1;
294 rank = -1;
295 base = 0;
296 rank_addr = -1ULL;
297 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) {
298 if (caddr >= base && caddr < rir[node][channel][i].limit) {
299 if (closed_page) {
300 way = (caddr >> 6) & 3;
301 rank_addr = (((caddr + (int64_t)
302 rir[node][channel][i].way[way].offset *
303 VRANK_SZ) /
304 rir[node][channel][i].interleave) &
305 ~0x3f) + (caddr & 0x3f);
306 } else {
307 way = (caddr >> 12) & 3;
308 rank_addr = (((caddr + (int64_t)
309 rir[node][channel][i].way[way].offset *
310 VRANK_SZ) /
311 rir[node][channel][i].interleave) &
312 ~0xfff) + (caddr & 0xfff);
313 }
314 rank = rir[node][channel][i].way[way].rank;
315 dimm = rank >> 2;
316 break;
317 }
318 base = rir[node][channel][i].limit;
319 }
320 *rank_p = rank;
321 *rank_addr_p = rank_addr;
322 return (dimm);
323 }
324
325 static int
socket_interleave(uint64_t addr,int node,int channel,int rule,int * way_p)326 socket_interleave(uint64_t addr, int node, int channel, int rule,
327 int *way_p)
328 {
329 int i, j;
330 uint64_t base;
331 uchar_t package;
332 uchar_t xp;
333 uchar_t xc;
334 int ot = 0;
335 int mode;
336 int start;
337 int rt = 1;
338 int found = 0;
339
340 if (mirror_mode[node] || lockstep[node])
341 channel = 0;
342 package = node + 1;
343 mode = tad[node][rule].mode;
344 base = 0;
345 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) {
346 if (sad[i].enable && addr >= base && addr < sad[i].limit) {
347 if (mode == 2) {
348 for (j = 0; j < INTERLEAVE_NWAY; j++) {
349 xp = SAD_INTERLEAVE(sad[i].node_list,
350 j);
351 if (package != xp) {
352 ot++;
353 if (found) {
354 rt = 2;
355 break;
356 }
357 } else {
358 found = 1;
359 if (ot) {
360 rt = 2;
361 break;
362 }
363 }
364 }
365 } else {
366 if (mode == 2)
367 start = *way_p;
368 else
369 start = 0;
370 for (j = start; j < INTERLEAVE_NWAY; j++) {
371 xp = SAD_INTERLEAVE(sad[i].node_list,
372 j);
373 if (package != xp) {
374 ot++;
375 if (found) {
376 rt = 2;
377 break;
378 }
379 } else if (!found) {
380 xc = TAD_INTERLEAVE(
381 tad[node][rule].pkg_list,
382 j);
383 if (channel == xc) {
384 *way_p = j;
385 if (ot) {
386 rt = 2;
387 break;
388 }
389 found = 1;
390 }
391 }
392 }
393 }
394 break;
395 }
396 base = sad[i].limit;
397 }
398 return (rt);
399 }
400
401 uint64_t
dimm_to_addr(int node,int channel,int rank,uint64_t rank_addr,uint64_t * rank_base_p,uint64_t * rank_sz_p,uint32_t * socket_interleave_p,uint32_t * channel_interleave_p,uint32_t * rank_interleave_p,uint32_t * socket_way_p,uint32_t * channel_way_p,uint32_t * rank_way_p)402 dimm_to_addr(int node, int channel, int rank, uint64_t rank_addr,
403 uint64_t *rank_base_p, uint64_t *rank_sz_p, uint32_t *socket_interleave_p,
404 uint32_t *channel_interleave_p, uint32_t *rank_interleave_p,
405 uint32_t *socket_way_p, uint32_t *channel_way_p, uint32_t *rank_way_p)
406 {
407 int i;
408 int way, xway;
409 uint64_t addr;
410 uint64_t caddr;
411 uint64_t cbaddr;
412 uint64_t baddr;
413 uint64_t rlimit;
414 uint64_t rank_sz;
415 uint64_t base;
416 int lchannel;
417 int bits;
418 int no_interleave;
419 int sinterleave;
420 int cinterleave;
421 int rinterleave;
422 int found = 0;
423
424 if (lockstep[node] || mirror_mode[node])
425 lchannel = 0;
426 else
427 lchannel = channel;
428 addr = -1;
429 base = 0;
430 for (i = 0; i < MAX_TAD_DRAM_RULE && found == 0; i++) {
431 for (way = 0; way < MAX_RIR_WAY; way++) {
432 if (rir[node][channel][i].way[way].dimm_rank == rank) {
433 rlimit = rir[node][channel][i].way[way].rlimit;
434 if (rlimit && rank_addr >= rlimit)
435 continue;
436 cbaddr = base;
437 if (closed_page) {
438 caddr = (rank_addr & ~0x3f) *
439 rir[node][channel][i].interleave -
440 (int64_t)rir[node][channel][i].
441 way[way].soffset * VRANK_SZ;
442 caddr += way << 6;
443 caddr |= rank_addr & 0x3f;
444 } else {
445 caddr = (rank_addr & ~0xfff) *
446 rir[node][channel][i].interleave -
447 (int64_t)rir[node][channel][i].
448 way[way].soffset * VRANK_SZ;
449 caddr += way << 12;
450 caddr |= rank_addr & 0xfff;
451 }
452 if (caddr < rir[node][channel][i].limit) {
453 rinterleave =
454 rir[node][channel][i].interleave;
455 rank_sz = (rir[node][channel][i].limit -
456 base) / rinterleave;
457 found = 1;
458 if (rank_interleave_p) {
459 *rank_interleave_p =
460 rinterleave;
461 }
462 if (rank_way_p)
463 *rank_way_p = way;
464 break;
465 }
466 }
467 }
468 base = rir[node][channel][i].limit;
469 }
470 if (!found)
471 return (-1ULL);
472 base = 0;
473 for (i = 0; i < MAX_TAD_DRAM_RULE; i++) {
474 way = 0;
475 if (tad[node][i].enable &&
476 channel_in_interleave(node, channel, i, &way,
477 &no_interleave)) {
478 bits = 0;
479 addr = caddr;
480 baddr = cbaddr;
481 if (sag_ch[node][lchannel][i].divby3) {
482 addr = (((addr >> 6) * 3) << 6) +
483 (addr & 0x3f);
484 baddr = (((baddr >> 6) * 3) << 6);
485 }
486 if (sag_ch[node][lchannel][i].remove6) {
487 bits = 1;
488 addr = ((addr & ~0x3f) << 1) | (addr & 0x3f);
489 baddr = (baddr & ~0x3f) << 1;
490 }
491 if (sag_ch[node][lchannel][i].remove7) {
492 bits = bits | 2;
493 addr = ((addr & ~0x7f) << 1) | (addr & 0x7f);
494 baddr = ((baddr & ~0x7f) << 1) | (baddr & 0x40);
495 }
496 if (sag_ch[node][lchannel][i].remove8) {
497 bits = bits | 4;
498 addr = ((addr & ~0xff) << 1) | (addr & 0xff);
499 baddr = ((baddr & ~0xff) << 1) | (baddr & 0xc0);
500 }
501 addr -= (int64_t)sag_ch[node][lchannel][i].soffset <<
502 16;
503 baddr -= (int64_t)
504 sag_ch[node][lchannel][i].soffset << 16;
505 if (addr < tad[node][i].limit) {
506 /*
507 * this is the target address descripter to use
508 */
509 sinterleave = socket_interleave(addr,
510 node, channel, i, &way);
511 if (socket_interleave_p) {
512 *socket_interleave_p = sinterleave;
513 }
514 if (socket_way_p)
515 *socket_way_p = way;
516 if ((no_interleave && sinterleave == 1) ||
517 mirror_mode[node] || lockstep[node]) {
518 cinterleave = 1;
519 } else {
520 cinterleave = channels_interleave(addr);
521 }
522 if (channel_interleave_p) {
523 *channel_interleave_p = cinterleave;
524 }
525 if (baddr + (rank_sz * rinterleave *
526 cinterleave * sinterleave) >
527 tad[node][i].limit) {
528 /*
529 * The system address mapped to this
530 * rank is not contiguous or has
531 * different socket/channel interleave
532 * adjust vitual rank to address where
533 * change or break occures
534 */
535 rank_sz = (tad[node][i].limit - baddr) /
536 (cinterleave * sinterleave *
537 rinterleave);
538 }
539 if (rank_sz_p) {
540 *rank_sz_p = rank_sz;
541 }
542 if (rank_base_p)
543 *rank_base_p = baddr;
544 if (channel_way_p)
545 *channel_way_p = way;
546 if (sinterleave == 1 && no_interleave) {
547 break;
548 }
549 switch (tad[node][i].mode) {
550 case 0:
551 addr += way * 0x40;
552 break;
553 case 1:
554 way = (way ^ (addr >> 16)) & bits;
555 addr += way * 0x40;
556 break;
557 case 2:
558 if (sinterleave == 1) {
559 xway = ((addr >> 4) & 4) |
560 (((addr >> 6) &
561 0x3ffffffff) % 3);
562 if (((way - xway) & 3) == 3)
563 xway = (way - xway) & 4;
564 else
565 xway = way - xway;
566 switch (xway) {
567 case 0:
568 way = 0;
569 break;
570 case 5:
571 way = 1;
572 break;
573 case 2:
574 way = 2;
575 break;
576 case 4:
577 way = 3;
578 break;
579 case 1:
580 way = 4;
581 break;
582 case 6:
583 way = 5;
584 break;
585 }
586 } else {
587 xway = (way & 3) -
588 (((addr >> 6) &
589 0x3ffffffff) % 3);
590 if (xway < 0)
591 xway += 3;
592 switch (xway) {
593 case 0:
594 way = 0;
595 break;
596 case 1:
597 way = 1;
598 break;
599 case 2:
600 way = 2;
601 break;
602 }
603 }
604 addr += way * 0x40;
605 break;
606 }
607 break;
608 } else if (baddr < tad[node][i].limit) {
609 /*
610 * the channel address is not contiguous or
611 * socket/channel interleave changes in the
612 * middle of the rank adjust base and size for
613 * virtual rank to where the break occurs
614 */
615 sinterleave = socket_interleave(baddr,
616 node, channel, i, &way);
617 if ((no_interleave && sinterleave == 1) ||
618 mirror_mode[node] || lockstep[node]) {
619 cinterleave = 1;
620 } else {
621 cinterleave =
622 channels_interleave(baddr);
623 }
624 rank_sz -= (tad[node][i].limit - baddr) /
625 (cinterleave * sinterleave * rinterleave);
626 cbaddr += (tad[node][i].limit - baddr) /
627 (cinterleave * sinterleave);
628 }
629 }
630 base = tad[node][i].limit;
631 }
632 return (addr);
633 }
634 /*ARGSUSED*/
635 static cmi_errno_t
nhm_patounum(void * arg,uint64_t pa,uint8_t valid_hi,uint8_t valid_lo,uint32_t synd,int syndtype,mc_unum_t * unump)636 nhm_patounum(void *arg, uint64_t pa, uint8_t valid_hi, uint8_t valid_lo,
637 uint32_t synd, int syndtype, mc_unum_t *unump)
638 {
639 int node;
640 int channel;
641 int dimm;
642 int rank;
643 int log_chan;
644 uint64_t bank, row, column;
645 uint64_t caddr, raddr;
646
647 node = address_to_node(pa, 0);
648 if (node == -1) {
649 return (CMIERR_UNKNOWN);
650 }
651 channel = address_to_channel(node, pa, syndtype, &log_chan, &caddr, 0);
652 if (channel == -1) {
653 return (CMIERR_UNKNOWN);
654 }
655 /*
656 * If driver was built with closed tree present then we will have Intel
657 * proprietary functions caddr_to_dimm and rankaddr_to_dimm for finding
658 * dimm/bank/row/column address otherwise we just locate dimm and
659 * offset.
660 */
661 if (&caddr_to_dimm)
662 dimm = caddr_to_dimm(node, log_chan, caddr, &rank, &raddr);
663 else
664 dimm = channel_addr_to_dimm(node, log_chan, caddr, &rank,
665 &raddr);
666 if (dimm == -1) {
667 return (CMIERR_UNKNOWN);
668
669 }
670 unump->unum_board = 0;
671 unump->unum_chip = node;
672 unump->unum_mc = 0;
673 unump->unum_chan = channel;
674 unump->unum_cs = dimm;
675 unump->unum_rank = rank;
676
677 if (&rankaddr_to_dimm) {
678 if (rankaddr_to_dimm(raddr, node, channel, dimm, 0, &bank, &row,
679 &column) != DDI_SUCCESS) {
680 return (CMIERR_UNKNOWN);
681 };
682 unump->unum_offset = TCODE_OFFSET(rank, bank, row, column);
683 } else {
684 unump->unum_offset = raddr;
685 }
686
687 return (CMI_SUCCESS);
688 }
689
690 /*ARGSUSED*/
691 static cmi_errno_t
nhm_unumtopa(void * arg,mc_unum_t * unump,nvlist_t * nvl,uint64_t * pap)692 nhm_unumtopa(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap)
693 {
694 uint64_t pa;
695 cmi_errno_t rt;
696 int node;
697 int channel;
698 int log_chan;
699 int rank;
700 int i;
701 nvlist_t **hcl, *hcsp;
702 uint_t npr;
703 uint64_t offset;
704 char *hcnm, *hcid;
705 long v;
706 uint64_t row, bank, col;
707 int dimm;
708 uint64_t rank_addr;
709
710 if (unump == NULL) {
711 if (nvlist_lookup_nvlist(nvl, FM_FMRI_HC_SPECIFIC,
712 &hcsp) != 0)
713 return (CMIERR_UNKNOWN);
714 if (nvlist_lookup_uint64(hcsp,
715 "asru-" FM_FMRI_HC_SPECIFIC_OFFSET, &offset) != 0 &&
716 nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_OFFSET,
717 &offset) != 0) {
718 if (nvlist_lookup_uint64(hcsp,
719 "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0 ||
720 nvlist_lookup_uint64(hcsp,
721 FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0) {
722 *pap = pa;
723 return (CMI_SUCCESS);
724 }
725 return (CMIERR_UNKNOWN);
726 }
727 if (nvlist_lookup_nvlist_array(nvl, FM_FMRI_HC_LIST,
728 &hcl, &npr) != 0)
729 return (CMIERR_UNKNOWN);
730 node = -1;
731 channel = -1;
732 dimm = -1;
733 rank = -1;
734 for (i = 0; i < npr; i++) {
735 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
736 &hcnm) != 0 ||
737 nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID,
738 &hcid) != 0 ||
739 ddi_strtol(hcid, NULL, 0, &v) != 0)
740 return (CMIERR_UNKNOWN);
741 if (strcmp(hcnm, "chip") == 0)
742 node = (int)v;
743 else if (strcmp(hcnm, "dram-channel") == 0)
744 channel = (int)v;
745 else if (strcmp(hcnm, "dimm") == 0)
746 dimm = (int)v;
747 else if (strcmp(hcnm, "rank") == 0)
748 rank = (int)v;
749 }
750 if (node == -1 || channel == -1 || dimm == -1 || rank == -1)
751 return (CMIERR_UNKNOWN);
752 } else {
753 node = unump->unum_chip;
754 channel = unump->unum_chan;
755 rank = unump->unum_rank;
756 dimm = unump->unum_cs;
757 offset = unump->unum_offset;
758 }
759
760 /*
761 * If driver was built with closed tree present then we will have Intel
762 * proprietary functions dimm_to_rankaddr for finding
763 * physical address.
764 */
765 if (&dimm_to_rankaddr && (offset & OFFSET_ROW_BANK_COL) != 0) {
766 row = TCODE_OFFSET_RAS(offset);
767 bank = TCODE_OFFSET_BANK(offset);
768 col = TCODE_OFFSET_CAS(offset);
769 rank_addr = dimm_to_rankaddr(node, channel, dimm, row,
770 bank, col, &log_chan);
771 pa = rankaddr_to_phyaddr(node, log_chan, dimm, rank,
772 rank_addr);
773 } else if ((offset & OFFSET_ROW_BANK_COL) == 0) {
774 pa = dimm_to_addr(node, channel, rank, offset, 0, 0, 0, 0, 0,
775 0, 0, 0);
776 } else {
777 pa = -1LL;
778 }
779
780 if (pa == -1) {
781 rt = CMIERR_UNKNOWN;
782 } else {
783 rt = CMI_SUCCESS;
784 *pap = pa;
785 }
786 return (rt);
787 }
788
789 static const cmi_mc_ops_t nhm_mc_ops = {
790 nhm_patounum,
791 nhm_unumtopa,
792 nhm_error_trap /* cmi_mc_logout */
793 };
794
795 /*ARGSUSED*/
796 int
inhm_mc_register(cmi_hdl_t hdl,void * arg1,void * arg2,void * arg3)797 inhm_mc_register(cmi_hdl_t hdl, void *arg1, void *arg2, void *arg3)
798 {
799 cmi_mc_register(hdl, &nhm_mc_ops, NULL);
800 return (CMI_HDL_WALK_NEXT);
801 }
802
803 static int
choose_cpu(int * lastslot_p)804 choose_cpu(int *lastslot_p)
805 {
806 uint32_t id;
807 int first;
808 int last;
809
810 first = 0;
811 last = MAX_CPU_NODES;
812 id = CPU_ID_RD(0);
813 if (id == NHM_EP_CPU || id == NHM_WS_CPU || id == NHM_JF_CPU ||
814 id == NHM_WM_CPU) {
815 id = CPU_ID_RD(1);
816 if (id != NHM_EP_CPU && id != NHM_WS_CPU && id != NHM_JF_CPU &&
817 id != NHM_WM_CPU) {
818 last = 1;
819 }
820 } else {
821 first = 1;
822 }
823 *lastslot_p = last;
824 return (first);
825 }
826
827 static int
sad_interleave(uint32_t list)828 sad_interleave(uint32_t list)
829 {
830 int rt = 1;
831 int i, j;
832 int p;
833
834 for (i = 1; i < INTERLEAVE_NWAY; i++) {
835 p = SAD_INTERLEAVE(list, i);
836 for (j = 0; j < i; j++) {
837 if (p == SAD_INTERLEAVE(list, j))
838 break;
839 }
840 if (i == j)
841 rt++;
842 }
843 return (rt);
844 }
845
846 static int
tad_interleave(uint32_t list)847 tad_interleave(uint32_t list)
848 {
849 int rt = 1;
850 int i, j;
851 int c;
852
853 for (i = 1; i < INTERLEAVE_NWAY; i++) {
854 c = TAD_INTERLEAVE(list, i);
855 for (j = 0; j < i; j++) {
856 if (c == TAD_INTERLEAVE(list, j))
857 break;
858 }
859 if (i == j)
860 rt++;
861 }
862 return (rt);
863 }
864
865 static void
set_rank(int socket,int channel,int rule,int way,int rank,uint64_t rank_addr)866 set_rank(int socket, int channel, int rule, int way, int rank,
867 uint64_t rank_addr)
868 {
869 int k, l;
870 if (rank_addr == 0)
871 return;
872 /*
873 * set limit on any rules which have virtual rank in current rank and
874 * are not already limited by earlier rule
875 */
876 for (k = 0; k < rule; k++) {
877 for (l = 0; l < MAX_RIR_WAY; l++) {
878 if (rir[socket][channel][k].way[l].dimm_rank == rank &&
879 rir[socket][channel][k].way[l].rlimit == 0) {
880 rir[socket][channel][k].way[l].rlimit =
881 rank_addr;
882 }
883 }
884 }
885 /*
886 * set limit if this rule supplies more than 1 virtual rank from current
887 * rank
888 */
889 for (l = 0; l < way; l++) {
890 if (rir[socket][channel][k].way[l].dimm_rank == rank &&
891 rir[socket][channel][k].way[l].rlimit == 0) {
892 rir[socket][channel][k].way[l].rlimit = rank_addr;
893 }
894 }
895 }
896
897 void
mem_reg_init()898 mem_reg_init()
899 {
900 int i, j, k, l, m;
901 uint32_t sad_dram_rule;
902 uint32_t tad_dram_rule;
903 uint32_t mc_ras_enables;
904 uint32_t mc_channel_mapping;
905 uint32_t sagch;
906 uint32_t rir_limit;
907 uint32_t rir_way;
908 uint32_t mc_control;
909 uint32_t id;
910 int nhm_slot;
911 int nhm_lastslot;
912 uint8_t rank;
913 uint64_t base;
914 int ras_dev = 0;
915 uint32_t dod_value;
916
917 nhm_slot = choose_cpu(&nhm_lastslot);
918
919 for (i = 0; i < MAX_SAD_DRAM_RULE; i++) {
920 sad_dram_rule = SAD_DRAM_RULE_RD(nhm_slot, i);
921 sad[i].enable = SAD_DRAM_RULE_ENABLE(sad_dram_rule);
922 sad[i].limit = SAD_DRAM_LIMIT(sad_dram_rule);
923 sad[i].mode = SAD_DRAM_MODE(sad_dram_rule);
924 sad[i].node_list = SAD_INTERLEAVE_LIST_RD(nhm_slot, i);
925 sad[i].interleave = sad_interleave(sad[i].node_list);
926 for (j = 0; j < INTERLEAVE_NWAY; j++) {
927 sad[i].node_tgt[j] = (sad[i].node_list >>
928 (j * 4)) & 0x3;
929 }
930 }
931
932 for (i = nhm_slot; i < nhm_lastslot; i++) {
933 id = MC_CPU_RAS_RD(i);
934 if (id == NHM_CPU_RAS || id == NHM_JF_CPU_RAS ||
935 id == NHM_WM_CPU_RAS) {
936 ras_dev = 1;
937 mc_ras_enables = MC_RAS_ENABLES_RD(i);
938 if (RAS_LOCKSTEP_ENABLE(mc_ras_enables))
939 lockstep[i] = 1;
940 if (RAS_MIRROR_MEM_ENABLE(mc_ras_enables))
941 mirror_mode[i] = 1;
942 }
943 mc_channel_mapping = MC_CHANNEL_MAPPER_RD(i);
944 if (CHANNEL_MAP(mc_channel_mapping, 2, 0) == 0 &&
945 CHANNEL_MAP(mc_channel_mapping, 2, 1) == 0)
946 spare_channel[i] = 1;
947 for (j = 0; j < MAX_TAD_DRAM_RULE; j++) {
948 tad_dram_rule = TAD_DRAM_RULE_RD(i, j);
949 tad[i][j].enable = TAD_DRAM_RULE_ENABLE(tad_dram_rule);
950 tad[i][j].limit = TAD_DRAM_LIMIT(tad_dram_rule);
951 tad[i][j].mode = TAD_DRAM_MODE(tad_dram_rule);
952 tad[i][j].pkg_list =
953 TAD_INTERLEAVE_LIST_RD(i, j);
954 for (k = 0; k < INTERLEAVE_NWAY; k++) {
955 tad[i][j].pkg_tgt[k] = ((tad[i][j].pkg_list >>
956 (k * 4)) & 0x3);
957 }
958 if (mirror_mode[i] || lockstep[i]) {
959 tad[i][j].interleave = 1;
960 } else {
961 tad[i][j].interleave =
962 tad_interleave(tad[i][j].pkg_list);
963 if (spare_channel[i] &&
964 tad[i][j].interleave ==
965 CHANNELS_PER_MEMORY_CONTROLLER)
966 tad[i][j].interleave--;
967 }
968 }
969 for (j = 0; j < CHANNELS_PER_MEMORY_CONTROLLER; j++) {
970 m = 0;
971 base = 0;
972 for (k = 0; k < MAX_TAD_DRAM_RULE; k++) {
973 sagch = MC_SAG_RD(i, j, k);
974 sag_ch[i][j][k].offset =
975 CH_ADDRESS_OFFSET(sagch);
976 sag_ch[i][j][k].soffset =
977 CH_ADDRESS_SOFFSET(sagch);
978 sag_ch[i][j][k].divby3 = DIVBY3(sagch);
979 sag_ch[i][j][k].remove6 = REMOVE_6(sagch);
980 sag_ch[i][j][k].remove7 = REMOVE_7(sagch);
981 sag_ch[i][j][k].remove8 = REMOVE_8(sagch);
982
983 rir_limit = MC_RIR_LIMIT_RD(i, j, k);
984 rir[i][j][k].limit = RIR_LIMIT(rir_limit);
985 for (l = 0; l < MAX_RIR_WAY; l++) {
986 rir_way = MC_RIR_WAY_RD(i, j, m);
987 rir[i][j][k].way[l].offset =
988 RIR_OFFSET(rir_way);
989 rir[i][j][k].way[l].soffset =
990 RIR_SOFFSET(rir_way);
991 rir[i][j][k].way[l].rank =
992 RIR_RANK(rir_way);
993 rir[i][j][k].way[l].dimm =
994 RIR_DIMM(rir_way);
995 rir[i][j][k].way[l].dimm_rank =
996 RIR_DIMM_RANK(rir_way);
997 rir[i][j][k].way[l].rlimit = 0;
998 m++;
999 }
1000 rank = rir[i][j][k].way[0].dimm_rank;
1001 if (rank == rir[i][j][k].way[1].dimm_rank &&
1002 rank == rir[i][j][k].way[2].dimm_rank &&
1003 rank == rir[i][j][k].way[3].dimm_rank) {
1004 rir[i][j][k].interleave = 1;
1005 } else if
1006 (rank == rir[i][j][k].way[1].dimm_rank ||
1007 rank == rir[i][j][k].way[2].dimm_rank ||
1008 rank == rir[i][j][k].way[3].dimm_rank) {
1009 rir[i][j][k].interleave = 2;
1010 } else {
1011 rir[i][j][k].interleave = 4;
1012 }
1013 for (l = 0; l < MAX_RIR_WAY; l++) {
1014 set_rank(i, j, k, l,
1015 rir[i][j][k].way[l].dimm_rank,
1016 ((rir[i][j][k].way[l].soffset +
1017 base) /
1018 rir[i][j][k].interleave));
1019 }
1020 base = rir[i][j][k].limit;
1021 }
1022 for (k = 0; k < MAX_DIMMS_PER_CHANNEL; k++) {
1023 dod_value = MC_DOD_RD(i, j, k);
1024 dod_reg[i][j][k].NUMCol = NUMCOL(dod_value);
1025 dod_reg[i][j][k].NUMRow = NUMROW(dod_value);
1026 dod_reg[i][j][k].NUMBank = NUMBANK(dod_value);
1027 dod_reg[i][j][k].NUMRank = NUMRANK(dod_value);
1028 dod_reg[i][j][k].DIMMPresent =
1029 DIMMPRESENT(dod_value);
1030 dod_reg[i][j][k].RankOffset =
1031 RANKOFFSET(dod_value);
1032 }
1033 }
1034 }
1035 mc_control = MC_CONTROL_RD(nhm_slot);
1036 closed_page = MC_CONTROL_CLOSED_PAGE(mc_control);
1037 if (ras_dev)
1038 ecc_enabled = MC_CONTROL_ECCEN(mc_control);
1039 else if ((MC_STATUS_RD(nhm_slot) & WS_ECC_ENABLED) != 0)
1040 ecc_enabled = 1;
1041 divby3_enabled = MC_CONTROL_DIVBY3(mc_control);
1042 }
1043