xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c (revision 35366b936dd27e7a780ce1c1fccdf6e3c3defe69)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_channels_per_branch = NB_MAX_CHANNELS_PER_BRANCH;
56 int nb_dimms_per_channel = 0;
57 
58 nb_dimm_t **nb_dimms;
59 int nb_ndimm;
60 uint32_t nb_chipset;
61 enum nb_memory_mode nb_mode;
62 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
63 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
64 uint32_t top_of_low_memory;
65 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
66 
67 extern int nb_no_smbios;
68 
69 errorq_t *nb_queue;
70 kmutex_t nb_mutex;
71 
72 static int nb_dimm_slots;
73 
74 static uint32_t nb_err0_int;
75 static uint32_t nb_err1_int;
76 static uint32_t nb_err2_int;
77 static uint32_t nb_mcerr_int;
78 static uint32_t nb_emask_int;
79 
80 static uint32_t nb_err0_fbd;
81 static uint32_t nb_err1_fbd;
82 static uint32_t nb_err2_fbd;
83 static uint32_t nb_mcerr_fbd;
84 static uint32_t nb_emask_fbd;
85 
86 static uint32_t nb_err0_mem;
87 static uint32_t nb_err1_mem;
88 static uint32_t nb_err2_mem;
89 static uint32_t nb_mcerr_mem;
90 static uint32_t nb_emask_mem;
91 
92 static uint16_t nb_err0_fsb;
93 static uint16_t nb_err1_fsb;
94 static uint16_t nb_err2_fsb;
95 static uint16_t nb_mcerr_fsb;
96 static uint16_t nb_emask_fsb;
97 
98 static uint16_t nb_err0_thr;
99 static uint16_t nb_err1_thr;
100 static uint16_t nb_err2_thr;
101 static uint16_t nb_mcerr_thr;
102 static uint16_t nb_emask_thr;
103 
104 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
105 static uint32_t emask_cor_pex[NB_PCI_DEV];
106 static uint32_t emask_rp_pex[NB_PCI_DEV];
107 static uint32_t docmd_pex[NB_PCI_DEV];
108 static uint32_t uncerrsev[NB_PCI_DEV];
109 
110 static uint32_t l_mcerr_int;
111 static uint32_t l_mcerr_fbd;
112 static uint32_t l_mcerr_mem;
113 static uint16_t l_mcerr_fsb;
114 static uint16_t l_mcerr_thr;
115 
116 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
117 uint_t nb5400_emask_fbd = 0;
118 int nb5000_reset_emask_fbd = 1;
119 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
120 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
121 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
122 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
123 
124 int nb5100_reset_emask_mem = 1;
125 uint_t nb5100_mask_poll_mem = EMASK_MEM_NF;
126 
127 uint_t nb5000_emask_fsb = 0;
128 int nb5000_reset_emask_fsb = 1;
129 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
130 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
131 
132 uint_t nb5400_emask_int = EMASK_INT_5400;
133 
134 uint_t nb7300_emask_int = EMASK_INT_7300;
135 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
136 uint_t nb5000_emask_int = EMASK_INT_5000;
137 int nb5000_reset_emask_int = 1;
138 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
139 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
140 
141 uint_t nb_mask_poll_thr = EMASK_THR_NF;
142 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
143 
144 int nb5000_reset_uncor_pex = 0;
145 uint_t nb5000_mask_uncor_pex = 0;
146 int nb5000_reset_cor_pex = 0;
147 uint_t nb5000_mask_cor_pex = 0xffffffff;
148 uint32_t nb5000_rp_pex = 0x1;
149 
150 int nb_mask_mc_set;
151 
152 typedef struct find_dimm_label {
153 	void (*label_function)(int, char *, int);
154 } find_dimm_label_t;
155 
156 static void x8450_dimm_label(int, char *, int);
157 static void cp3250_dimm_label(int, char *, int);
158 
159 static struct platform_label {
160 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
161 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
162 	find_dimm_label_t dimm_label;
163 	int dimms_per_channel;
164 } platform_label[] = {
165 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
166 	    x8450_dimm_label, 8 },
167 	{ "MiTAC,Shunde", "CP3250", cp3250_dimm_label, 0 },
168 	{ NULL, NULL, NULL, 0 }
169 };
170 
171 static unsigned short
172 read_spd(int bus)
173 {
174 	unsigned short rt = 0;
175 	int branch = bus >> 1;
176 	int channel = bus & 1;
177 
178 	rt = SPD_RD(branch, channel);
179 
180 	return (rt);
181 }
182 
183 static void
184 write_spdcmd(int bus, uint32_t val)
185 {
186 	int branch = bus >> 1;
187 	int channel = bus & 1;
188 	SPDCMD_WR(branch, channel, val);
189 }
190 
191 static int
192 read_spd_eeprom(int bus, int slave, int addr)
193 {
194 	int retry = 4;
195 	int wait;
196 	int spd;
197 	uint32_t cmd;
198 
199 	for (;;) {
200 		wait = 1000;
201 		for (;;) {
202 			spd = read_spd(bus);
203 			if ((spd & SPD_BUSY) == 0)
204 				break;
205 			if (--wait == 0)
206 				return (-1);
207 			drv_usecwait(10);
208 		}
209 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
210 		write_spdcmd(bus, cmd);
211 		wait = 1000;
212 		for (;;) {
213 			spd = read_spd(bus);
214 			if ((spd & SPD_BUSY) == 0)
215 				break;
216 			if (--wait == 0) {
217 				spd = SPD_BUS_ERROR;
218 				break;
219 			}
220 			drv_usecwait(10);
221 		}
222 		while ((spd & SPD_BUS_ERROR) == 0 &&
223 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
224 		    SPD_READ_DATA_VALID) {
225 			spd = read_spd(bus);
226 			if (--wait == 0)
227 				return (-1);
228 		}
229 		if ((spd & SPD_BUS_ERROR) == 0)
230 			break;
231 		if (--retry == 0)
232 			return (-1);
233 	}
234 	return (spd & 0xff);
235 }
236 
237 static void
238 nb_fini()
239 {
240 	int i, j;
241 	int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
242 	nb_dimm_t **dimmpp;
243 	nb_dimm_t *dimmp;
244 
245 	dimmpp = nb_dimms;
246 	for (i = 0; i < nchannels; i++) {
247 		for (j = 0; j < nb_dimms_per_channel; j++) {
248 			dimmp = *dimmpp;
249 			if (dimmp) {
250 				kmem_free(dimmp, sizeof (nb_dimm_t));
251 				*dimmpp = NULL;
252 			}
253 			dimmp++;
254 		}
255 	}
256 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
257 	nb_dimms = NULL;
258 	dimm_fini();
259 }
260 
261 void
262 nb_scrubber_enable()
263 {
264 	uint32_t mc;
265 
266 	if (!nb_hw_memory_scrub_enable)
267 		return;
268 
269 	mc = MC_RD();
270 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
271 		mc |= MC_PATROL_SCRUB;
272 	else
273 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
274 	MC_WR(mc);
275 
276 	if (nb_sw_scrub_disabled++)
277 		cmi_mc_sw_memscrub_disable();
278 }
279 
280 static void
281 fbd_eeprom(int channel, int dimm, nb_dimm_t *dp)
282 {
283 	int i, t;
284 	int spd_sz;
285 
286 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
287 	if (t == 1)
288 		spd_sz = 128;
289 	else if (t == 2)
290 		spd_sz = 176;
291 	else
292 		spd_sz = 256;
293 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
294 	    (read_spd_eeprom(channel, dimm, 118) << 8);
295 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
296 	dp->serial_number =
297 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
298 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
299 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
300 	    read_spd_eeprom(channel, dimm, 125);
301 	t = read_spd_eeprom(channel, dimm, 121);
302 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
303 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
304 	if (spd_sz > 128) {
305 		for (i = 0; i < sizeof (dp->part_number); i++) {
306 			dp->part_number[i] =
307 			    read_spd_eeprom(channel, dimm, 128 + i);
308 		}
309 		for (i = 0; i < sizeof (dp->revision); i++) {
310 			dp->revision[i] =
311 			    read_spd_eeprom(channel, dimm, 146 + i);
312 		}
313 	}
314 }
315 
316 /* read the manR of the DDR2 dimm */
317 static void
318 ddr2_eeprom(int channel, int dimm, nb_dimm_t *dp)
319 {
320 	int i, t;
321 	int slave;
322 
323 	slave = channel & 0x1 ? dimm + 4 : dimm;
324 
325 	/* byte[3]: number of row addresses */
326 	dp->nrow = read_spd_eeprom(channel, slave, 3) & 0x1f;
327 
328 	/* byte[4]: number of column addresses */
329 	dp->ncolumn = read_spd_eeprom(channel, slave, 4) & 0xf;
330 
331 	/* byte[5]: numranks; 0 means one rank */
332 	dp->nranks = (read_spd_eeprom(channel, slave, 5) & 0x3) + 1;
333 
334 	/* byte[6]: data width */
335 	dp->width = (read_spd_eeprom(channel, slave, 6) >> 5) << 2;
336 
337 	/* byte[17]: number of banks */
338 	dp->nbanks = read_spd_eeprom(channel, slave, 17);
339 
340 	dp->dimm_size = DIMMSIZE(dp->nrow, dp->ncolumn, dp->nranks, dp->nbanks,
341 	    dp->width);
342 
343 	/* manufacture-id - byte[64-65] */
344 	dp->manufacture_id = read_spd_eeprom(channel, slave, 64) |
345 	    (read_spd_eeprom(channel, dimm, 65) << 8);
346 
347 	/* location - byte[72] */
348 	dp->manufacture_location = read_spd_eeprom(channel, slave, 72);
349 
350 	/* serial number - byte[95-98] */
351 	dp->serial_number =
352 	    (read_spd_eeprom(channel, slave, 98) << 24) |
353 	    (read_spd_eeprom(channel, slave, 97) << 16) |
354 	    (read_spd_eeprom(channel, slave, 96) << 8) |
355 	    read_spd_eeprom(channel, slave, 95);
356 
357 	/* week - byte[94] */
358 	t = read_spd_eeprom(channel, slave, 94);
359 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
360 	/* week - byte[93] */
361 	t = read_spd_eeprom(channel, slave, 93);
362 	dp->manufacture_year = (t >> 4) * 10 + (t & 0xf) + 2000;
363 
364 	/* part number - byte[73-81] */
365 	for (i = 0; i < 8; i++) {
366 		dp->part_number[i] = read_spd_eeprom(channel, slave, 73 + i);
367 	}
368 
369 	/* revision - byte[91-92] */
370 	for (i = 0; i < 2; i++) {
371 		dp->revision[i] = read_spd_eeprom(channel, slave, 91 + i);
372 	}
373 }
374 
375 static boolean_t
376 nb_dimm_present(int channel, int dimm)
377 {
378 	boolean_t rc = B_FALSE;
379 
380 	if (nb_chipset == INTEL_NB_5100) {
381 		int t, slave;
382 		slave = channel & 0x1 ? dimm + 4 : dimm;
383 		/* read the type field from the dimm and check for DDR2 type */
384 		if ((t = read_spd_eeprom(channel, slave, SPD_MEM_TYPE)) == -1)
385 			return (B_FALSE);
386 		rc = (t & 0xf) == SPD_DDR2;
387 	} else {
388 		rc = MTR_PRESENT(MTR_RD(channel, dimm)) != 0;
389 	}
390 
391 	return (rc);
392 }
393 
394 static nb_dimm_t *
395 nb_ddr2_dimm_init(int channel, int dimm, int start_rank)
396 {
397 	nb_dimm_t *dp;
398 
399 	if (nb_dimm_present(channel, dimm) == B_FALSE)
400 		return (NULL);
401 
402 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
403 
404 	ddr2_eeprom(channel, dimm, dp);
405 
406 	/* The 1st rank of the dimm takes on this value */
407 	dp->start_rank = (uint8_t)start_rank;
408 
409 	dp->mtr_present = 1;
410 
411 	return (dp);
412 }
413 
414 static nb_dimm_t *
415 nb_fbd_dimm_init(int channel, int dimm, uint16_t mtr)
416 {
417 	nb_dimm_t *dp;
418 	int t;
419 
420 	if (MTR_PRESENT(mtr) == 0)
421 		return (NULL);
422 	t = read_spd_eeprom(channel, dimm, SPD_MEM_TYPE) & 0xf;
423 
424 	/* check for the dimm type */
425 	if (t != SPD_FBDIMM)
426 		return (NULL);
427 
428 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
429 
430 	fbd_eeprom(channel, dimm, dp);
431 
432 	dp->mtr_present = MTR_PRESENT(mtr);
433 	dp->start_rank = dimm << 1;
434 	dp->nranks = MTR_NUMRANK(mtr);
435 	dp->nbanks = MTR_NUMBANK(mtr);
436 	dp->ncolumn = MTR_NUMCOL(mtr);
437 	dp->nrow = MTR_NUMROW(mtr);
438 	dp->width = MTR_WIDTH(mtr);
439 	dp->dimm_size = MTR_DIMMSIZE(mtr);
440 
441 	return (dp);
442 }
443 
444 static uint64_t
445 mc_range(int controller, uint64_t base)
446 {
447 	int i;
448 	uint64_t limit = 0;
449 
450 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
451 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
452 		    base < nb_banks[i].limit) {
453 			limit = nb_banks[i].limit;
454 			if (base <= top_of_low_memory &&
455 			    limit > top_of_low_memory) {
456 				limit -= TLOW_MAX - top_of_low_memory;
457 			}
458 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
459 			    nb_mode != NB_MEMORY_MIRROR) {
460 				limit = limit / 2;
461 			}
462 		}
463 	}
464 	return (limit);
465 }
466 
467 void
468 nb_mc_init()
469 {
470 	uint16_t tolm;
471 	uint16_t mir;
472 	uint32_t hole_base;
473 	uint32_t hole_size;
474 	uint32_t dmir;
475 	uint64_t base;
476 	uint64_t limit;
477 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
478 	int i, j, k;
479 	uint8_t interleave;
480 
481 	base = 0;
482 	tolm = TOLM_RD();
483 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
484 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
485 		mir = MIR_RD(i);
486 		limit = (uint64_t)(mir >> 4) << 28;
487 		way0 = mir & 1;
488 		way1 = (mir >> 1) & 1;
489 		if (way0 == 0 && way1 == 0) {
490 			way0 = 1;
491 			way1 = 1;
492 		}
493 		if (limit > top_of_low_memory)
494 			limit += TLOW_MAX - top_of_low_memory;
495 		nb_banks[i].base = base;
496 		nb_banks[i].limit = limit;
497 		nb_banks[i].way[0] = way0;
498 		nb_banks[i].way[1] = way1;
499 		base = limit;
500 	}
501 	for (i = 0; i < nb_number_memory_controllers; i++) {
502 		base = 0;
503 
504 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
505 			dmir = DMIR_RD(i, j);
506 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
507 			if (limit == 0) {
508 				limit = mc_range(i, base);
509 			}
510 			branch_interleave = 0;
511 			hole_base = 0;
512 			hole_size = 0;
513 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
514 			if (rank0 == rank1)
515 				interleave = 1;
516 			else if (rank0 == rank2)
517 				interleave = 2;
518 			else
519 				interleave = 4;
520 			if (nb_mode != NB_MEMORY_MIRROR &&
521 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
522 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
523 					if (base >= nb_banks[k].base &&
524 					    base < nb_banks[k].limit) {
525 						if (nb_banks[i].way[0] &&
526 						    nb_banks[i].way[1]) {
527 							interleave *= 2;
528 							limit *= 2;
529 							branch_interleave = 1;
530 						}
531 						break;
532 					}
533 				}
534 			}
535 			if (base < top_of_low_memory &&
536 			    limit > top_of_low_memory) {
537 				hole_base = top_of_low_memory;
538 				hole_size = TLOW_MAX - top_of_low_memory;
539 				limit += hole_size;
540 			} else if (base > top_of_low_memory) {
541 				limit += TLOW_MAX - top_of_low_memory;
542 			}
543 			nb_ranks[i][j].base = base;
544 			nb_ranks[i][j].limit = limit;
545 			nb_ranks[i][j].rank[0] = rank0;
546 			nb_ranks[i][j].rank[1] = rank1;
547 			nb_ranks[i][j].rank[2] = rank2;
548 			nb_ranks[i][j].rank[3] = rank3;
549 			nb_ranks[i][j].interleave = interleave;
550 			nb_ranks[i][j].branch_interleave = branch_interleave;
551 			nb_ranks[i][j].hole_base = hole_base;
552 			nb_ranks[i][j].hole_size = hole_size;
553 			if (limit > base) {
554 				if (rank0 != rank1) {
555 					dimm_add_rank(i, rank1,
556 					    branch_interleave, 1, base,
557 					    hole_base, hole_size, interleave,
558 					    limit);
559 					if (rank0 != rank2) {
560 						dimm_add_rank(i, rank2,
561 						    branch_interleave, 2, base,
562 						    hole_base, hole_size,
563 						    interleave, limit);
564 						dimm_add_rank(i, rank3,
565 						    branch_interleave, 3, base,
566 						    hole_base, hole_size,
567 						    interleave, limit);
568 					}
569 				}
570 			}
571 			base = limit;
572 		}
573 	}
574 }
575 
576 void
577 nb_used_spare_rank(int branch, int bad_rank)
578 {
579 	int i;
580 	int j;
581 
582 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
583 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
584 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
585 				nb_ranks[branch][i].rank[j] =
586 				    spare_rank[branch];
587 				i = NB_MEM_RANK_SELECT;
588 				break;
589 			}
590 		}
591 	}
592 }
593 
594 find_dimm_label_t *
595 find_dimms_per_channel()
596 {
597 	struct platform_label *pl;
598 	smbios_info_t si;
599 	smbios_system_t sy;
600 	id_t id;
601 	int i, j;
602 	find_dimm_label_t *rt = NULL;
603 
604 	if (ksmbios != NULL && nb_no_smbios == 0) {
605 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
606 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
607 			for (pl = platform_label; pl->sys_vendor; pl++) {
608 				if (strncmp(pl->sys_vendor,
609 				    si.smbi_manufacturer,
610 				    strlen(pl->sys_vendor)) == 0 &&
611 				    strncmp(pl->sys_product, si.smbi_product,
612 				    strlen(pl->sys_product)) == 0) {
613 					nb_dimms_per_channel =
614 					    pl->dimms_per_channel;
615 					rt = &pl->dimm_label;
616 					break;
617 				}
618 			}
619 		}
620 	}
621 	if (nb_dimms_per_channel == 0) {
622 		/*
623 		 * Scan all memory channels if we find a channel which has more
624 		 * dimms then we have seen before set nb_dimms_per_channel to
625 		 * the number of dimms on the channel
626 		 */
627 		for (i = 0; i < nb_number_memory_controllers; i++) {
628 			for (j = nb_dimms_per_channel;
629 			    j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
630 				if (nb_dimm_present(i, j))
631 					nb_dimms_per_channel = j + 1;
632 			}
633 		}
634 	}
635 	return (rt);
636 }
637 
638 struct smb_dimm_rec {
639 	int dimms;
640 	int slots;
641 	int populated;
642 	nb_dimm_t **dimmpp;
643 };
644 
645 static int
646 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
647 {
648 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
649 	nb_dimm_t ***dimmpp;
650 	nb_dimm_t *dimmp;
651 	smbios_memdevice_t md;
652 
653 	dimmpp = &rp->dimmpp;
654 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
655 		if (*dimmpp >= &nb_dimms[nb_dimm_slots])
656 			return (-1);
657 		dimmp = **dimmpp;
658 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
659 		    md.smbmd_dloc != NULL) {
660 			if (md.smbmd_size) {
661 				if (dimmp == NULL &&
662 				    (rp->slots == nb_dimm_slots ||
663 				    rp->dimms < rp->populated)) {
664 					(*dimmpp)++;
665 					return (0);
666 				}
667 				/*
668 				 * if there is no physical dimm for this smbios
669 				 * record it is because this system has less
670 				 * physical slots than the controller supports
671 				 * so skip empty slots to find the slot this
672 				 * smbios record belongs too
673 				 */
674 				while (dimmp == NULL) {
675 					(*dimmpp)++;
676 					if (*dimmpp >= &nb_dimms[nb_dimm_slots])
677 						return (-1);
678 					dimmp = **dimmpp;
679 				}
680 				(void) snprintf(dimmp->label,
681 				    sizeof (dimmp->label), "%s", md.smbmd_dloc);
682 				(*dimmpp)++;
683 			}
684 		}
685 	}
686 	return (0);
687 }
688 
689 static int
690 check_memdevice(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
691 {
692 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
693 	smbios_memdevice_t md;
694 
695 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
696 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0) {
697 			rp->slots++;
698 			if (md.smbmd_size) {
699 				rp->populated++;
700 			}
701 		}
702 	}
703 	return (0);
704 }
705 
706 void
707 nb_smbios()
708 {
709 	struct smb_dimm_rec r;
710 	int i;
711 
712 	if (ksmbios != NULL && nb_no_smbios == 0) {
713 		r.dimms = 0;
714 		r.slots = 0;
715 		r.populated = 0;
716 		r.dimmpp = nb_dimms;
717 		for (i = 0; i < nb_dimm_slots; i++) {
718 			if (nb_dimms[i] != NULL)
719 				r.dimms++;
720 		}
721 		(void) smbios_iter(ksmbios, check_memdevice, &r);
722 		(void) smbios_iter(ksmbios, dimm_label, &r);
723 	}
724 }
725 
726 static void
727 x8450_dimm_label(int dimm, char *label, int label_sz)
728 {
729 	int channel = dimm >> 3;
730 
731 	dimm = dimm & 0x7;
732 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
733 }
734 
735 /*
736  * CP3250 DIMM labels
737  * Channel   Dimm   Label
738  *       0      0      A0
739  *       1      0      B0
740  *       0      1      A1
741  *       1      1      B1
742  *       0      2      A2
743  *       1      2      B2
744  */
745 static void
746 cp3250_dimm_label(int dimm, char *label, int label_sz)
747 {
748 	int channel = dimm / nb_dimms_per_channel;
749 
750 	dimm = dimm % nb_dimms_per_channel;
751 	(void) snprintf(label, label_sz, "%c%d", channel == 0 ? 'A' : 'B',
752 	    dimm);
753 }
754 
755 /*
756  * Map the rank id to dimm id of a channel
757  * For the 5100 chipset, walk through the dimm list of channel the check if
758  * the given rank id is within the rank range assigned to the dimm.
759  * For other chipsets, the dimm is rank/2.
760  */
761 int
762 nb_rank2dimm(int channel, int rank)
763 {
764 	int i;
765 	nb_dimm_t **dimmpp = nb_dimms;
766 
767 	if (nb_chipset != INTEL_NB_5100)
768 		return (rank >> 1);
769 
770 	dimmpp += channel * nb_dimms_per_channel;
771 	for (i = 0; i < nb_dimms_per_channel; i++) {
772 		if ((rank >= dimmpp[i]->start_rank) &&
773 		    (rank < dimmpp[i]->start_rank + dimmpp[i]->nranks)) {
774 			return (i);
775 		}
776 	}
777 	return (-1);
778 }
779 
780 static void
781 nb_ddr2_dimms_init(find_dimm_label_t *label_function)
782 {
783 	int i, j;
784 	int start_rank;
785 	uint32_t spcpc;
786 	uint8_t spcps;
787 	nb_dimm_t **dimmpp;
788 
789 	nb_dimm_slots = nb_number_memory_controllers * nb_channels_per_branch *
790 	    nb_dimms_per_channel;
791 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
792 	    nb_dimm_slots, KM_SLEEP);
793 	dimmpp = nb_dimms;
794 	nb_mode = NB_MEMORY_NORMAL;
795 	for (i = 0; i < nb_number_memory_controllers; i++) {
796 		if (nb_mode == NB_MEMORY_NORMAL) {
797 			spcpc = SPCPC_RD(i);
798 			spcps = SPCPS_RD(i);
799 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
800 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
801 				nb_mode = NB_MEMORY_SPARE_RANK;
802 			spare_rank[i] = SPCPC_SPRANK(spcpc);
803 		}
804 
805 		/* The 1st dimm of a channel starts at rank 0 */
806 		start_rank = 0;
807 
808 		for (j = 0; j < nb_dimms_per_channel; j++) {
809 			dimmpp[j] = nb_ddr2_dimm_init(i, j, start_rank);
810 			if (dimmpp[j]) {
811 				nb_ndimm ++;
812 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
813 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
814 				    dimmpp[j]->nrow);
815 				if (label_function) {
816 					label_function->label_function(
817 					    (i * nb_dimms_per_channel) + j,
818 					    dimmpp[j]->label,
819 					    sizeof (dimmpp[j]->label));
820 				}
821 				start_rank += dimmpp[j]->nranks;
822 				/*
823 				 * add an extra rank because
824 				 * single-ranked dimm still takes on two ranks.
825 				 */
826 				if (dimmpp[j]->nranks & 0x1)
827 					start_rank++;
828 				}
829 		}
830 		dimmpp += nb_dimms_per_channel;
831 	}
832 
833 	/*
834 	 * single channel is supported.
835 	 */
836 	if (nb_ndimm > 0 && nb_ndimm <= nb_dimms_per_channel) {
837 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
838 	}
839 }
840 
841 static void
842 nb_fbd_dimms_init(find_dimm_label_t *label_function)
843 {
844 	int i, j, k, l;
845 	uint16_t mtr;
846 	uint32_t mc, mca;
847 	uint32_t spcpc;
848 	uint8_t spcps;
849 	nb_dimm_t **dimmpp;
850 
851 	mca = MCA_RD();
852 	mc = MC_RD();
853 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
854 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
855 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
856 		nb_mode = NB_MEMORY_MIRROR;
857 	else
858 		nb_mode = NB_MEMORY_NORMAL;
859 	nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
860 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
861 	    nb_dimm_slots, KM_SLEEP);
862 	dimmpp = nb_dimms;
863 	for (i = 0; i < nb_number_memory_controllers; i++) {
864 		if (nb_mode == NB_MEMORY_NORMAL) {
865 			spcpc = SPCPC_RD(i);
866 			spcps = SPCPS_RD(i);
867 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
868 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
869 				nb_mode = NB_MEMORY_SPARE_RANK;
870 			spare_rank[i] = SPCPC_SPRANK(spcpc);
871 		}
872 		for (j = 0; j < nb_dimms_per_channel; j++) {
873 			mtr = MTR_RD(i, j);
874 			k = i * 2;
875 			dimmpp[j] = nb_fbd_dimm_init(k, j, mtr);
876 			if (dimmpp[j]) {
877 				nb_ndimm ++;
878 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
879 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
880 				    dimmpp[j]->nrow);
881 				if (label_function) {
882 					label_function->label_function(
883 					    (k * nb_dimms_per_channel) + j,
884 					    dimmpp[j]->label,
885 					    sizeof (dimmpp[j]->label));
886 				}
887 			}
888 			dimmpp[j + nb_dimms_per_channel] =
889 			    nb_fbd_dimm_init(k + 1, j, mtr);
890 			l = j + nb_dimms_per_channel;
891 			if (dimmpp[l]) {
892 				if (label_function) {
893 					label_function->label_function(
894 					    (k * nb_dimms_per_channel) + l,
895 					    dimmpp[l]->label,
896 					    sizeof (dimmpp[l]->label));
897 				}
898 				nb_ndimm ++;
899 			}
900 		}
901 		dimmpp += nb_dimms_per_channel * 2;
902 	}
903 }
904 
905 static void
906 nb_dimms_init(find_dimm_label_t *label_function)
907 {
908 	if (nb_chipset == INTEL_NB_5100)
909 		nb_ddr2_dimms_init(label_function);
910 	else
911 		nb_fbd_dimms_init(label_function);
912 
913 	if (label_function == NULL)
914 		nb_smbios();
915 }
916 
917 /* Setup the ESI port registers to enable SERR for southbridge */
918 static void
919 nb_pex_init()
920 {
921 	int i = 0; /* ESI port */
922 	uint16_t regw;
923 
924 	emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
925 	emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
926 	emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
927 	docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
928 	uncerrsev[i] = UNCERRSEV_RD(i);
929 
930 	if (nb5000_reset_uncor_pex)
931 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
932 	if (nb5000_reset_cor_pex)
933 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
934 	if (nb_chipset == INTEL_NB_5400) {
935 		/* disable masking of ERR pins used by DOCMD */
936 		PEX_ERR_PIN_MASK_WR(i, 0x10);
937 	}
938 
939 	/* RP error message (CE/NFE/FE) detect mask */
940 	EMASK_RP_PEX_WR(i, nb5000_rp_pex);
941 
942 	/* Command Register - Enable SERR */
943 	regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
944 	nb_pci_putw(0, i, 0, PCI_CONF_COMM,
945 	    regw | PCI_COMM_SERR_ENABLE);
946 
947 	/* Root Control Register - SERR on NFE/FE */
948 	PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
949 	    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
950 
951 	/* AER UE Mask - Mask UR */
952 	UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
953 }
954 
955 static void
956 nb_pex_fini()
957 {
958 	int i = 0; /* ESI port */
959 
960 	EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
961 	EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
962 	EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
963 	PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
964 
965 	if (nb5000_reset_uncor_pex)
966 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
967 	if (nb5000_reset_cor_pex)
968 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
969 }
970 
971 void
972 nb_int_init()
973 {
974 	uint32_t err0_int;
975 	uint32_t err1_int;
976 	uint32_t err2_int;
977 	uint32_t mcerr_int;
978 	uint32_t emask_int;
979 	uint16_t stepping;
980 
981 	err0_int = ERR0_INT_RD();
982 	err1_int = ERR1_INT_RD();
983 	err2_int = ERR2_INT_RD();
984 	mcerr_int = MCERR_INT_RD();
985 	emask_int = EMASK_INT_RD();
986 
987 	nb_err0_int = err0_int;
988 	nb_err1_int = err1_int;
989 	nb_err2_int = err2_int;
990 	nb_mcerr_int = mcerr_int;
991 	nb_emask_int = emask_int;
992 
993 	ERR0_INT_WR(ERR_INT_ALL);
994 	ERR1_INT_WR(ERR_INT_ALL);
995 	ERR2_INT_WR(ERR_INT_ALL);
996 	MCERR_INT_WR(ERR_INT_ALL);
997 	EMASK_INT_WR(ERR_INT_ALL);
998 
999 	mcerr_int &= ~nb5000_mask_bios_int;
1000 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
1001 	mcerr_int |= nb5000_mask_poll_int;
1002 	err0_int |= nb5000_mask_poll_int;
1003 	err1_int |= nb5000_mask_poll_int;
1004 	err2_int |= nb5000_mask_poll_int;
1005 
1006 	l_mcerr_int = mcerr_int;
1007 	ERR0_INT_WR(err0_int);
1008 	ERR1_INT_WR(err1_int);
1009 	ERR2_INT_WR(err2_int);
1010 	MCERR_INT_WR(mcerr_int);
1011 	if (nb5000_reset_emask_int) {
1012 		if (nb_chipset == INTEL_NB_7300) {
1013 			stepping = NB5000_STEPPING();
1014 			if (stepping == 0)
1015 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
1016 			else
1017 				EMASK_5000_INT_WR(nb7300_emask_int);
1018 		} else if (nb_chipset == INTEL_NB_5400) {
1019 			EMASK_5400_INT_WR(nb5400_emask_int |
1020 			    (emask_int & EMASK_INT_RES));
1021 		} else {
1022 			EMASK_5000_INT_WR(nb5000_emask_int);
1023 		}
1024 	} else {
1025 		EMASK_INT_WR(nb_emask_int);
1026 	}
1027 }
1028 
1029 void
1030 nb_int_fini()
1031 {
1032 	ERR0_INT_WR(ERR_INT_ALL);
1033 	ERR1_INT_WR(ERR_INT_ALL);
1034 	ERR2_INT_WR(ERR_INT_ALL);
1035 	MCERR_INT_WR(ERR_INT_ALL);
1036 	EMASK_INT_WR(ERR_INT_ALL);
1037 
1038 	ERR0_INT_WR(nb_err0_int);
1039 	ERR1_INT_WR(nb_err1_int);
1040 	ERR2_INT_WR(nb_err2_int);
1041 	MCERR_INT_WR(nb_mcerr_int);
1042 	EMASK_INT_WR(nb_emask_int);
1043 }
1044 
1045 void
1046 nb_int_mask_mc(uint32_t mc_mask_int)
1047 {
1048 	uint32_t emask_int;
1049 
1050 	emask_int = MCERR_INT_RD();
1051 	if ((emask_int & mc_mask_int) != mc_mask_int) {
1052 		MCERR_INT_WR(emask_int|mc_mask_int);
1053 		nb_mask_mc_set = 1;
1054 	}
1055 }
1056 
1057 static void
1058 nb_fbd_init()
1059 {
1060 	uint32_t err0_fbd;
1061 	uint32_t err1_fbd;
1062 	uint32_t err2_fbd;
1063 	uint32_t mcerr_fbd;
1064 	uint32_t emask_fbd;
1065 	uint32_t emask_bios_fbd;
1066 	uint32_t emask_poll_fbd;
1067 
1068 	err0_fbd = ERR0_FBD_RD();
1069 	err1_fbd = ERR1_FBD_RD();
1070 	err2_fbd = ERR2_FBD_RD();
1071 	mcerr_fbd = MCERR_FBD_RD();
1072 	emask_fbd = EMASK_FBD_RD();
1073 
1074 	nb_err0_fbd = err0_fbd;
1075 	nb_err1_fbd = err1_fbd;
1076 	nb_err2_fbd = err2_fbd;
1077 	nb_mcerr_fbd = mcerr_fbd;
1078 	nb_emask_fbd = emask_fbd;
1079 
1080 	ERR0_FBD_WR(0xffffffff);
1081 	ERR1_FBD_WR(0xffffffff);
1082 	ERR2_FBD_WR(0xffffffff);
1083 	MCERR_FBD_WR(0xffffffff);
1084 	EMASK_FBD_WR(0xffffffff);
1085 
1086 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
1087 		/* MCH 7300 errata 34 */
1088 		emask_bios_fbd = nb5000_mask_bios_fbd & ~EMASK_FBD_M23;
1089 		emask_poll_fbd = nb5000_mask_poll_fbd;
1090 		mcerr_fbd |= EMASK_FBD_M23;
1091 	} else if (nb_chipset == INTEL_NB_5400) {
1092 		emask_bios_fbd = nb5400_mask_bios_fbd;
1093 		emask_poll_fbd = nb5400_mask_poll_fbd;
1094 	} else {
1095 		emask_bios_fbd = nb5000_mask_bios_fbd;
1096 		emask_poll_fbd = nb5000_mask_poll_fbd;
1097 	}
1098 	mcerr_fbd &= ~emask_bios_fbd;
1099 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
1100 	mcerr_fbd |= emask_poll_fbd;
1101 	err0_fbd |= emask_poll_fbd;
1102 	err1_fbd |= emask_poll_fbd;
1103 	err2_fbd |= emask_poll_fbd;
1104 
1105 	l_mcerr_fbd = mcerr_fbd;
1106 	ERR0_FBD_WR(err0_fbd);
1107 	ERR1_FBD_WR(err1_fbd);
1108 	ERR2_FBD_WR(err2_fbd);
1109 	MCERR_FBD_WR(mcerr_fbd);
1110 	if (nb5000_reset_emask_fbd) {
1111 		if (nb_chipset == INTEL_NB_5400)
1112 			EMASK_FBD_WR(nb5400_emask_fbd);
1113 		else
1114 			EMASK_FBD_WR(nb5000_emask_fbd);
1115 	} else {
1116 		EMASK_FBD_WR(nb_emask_fbd);
1117 	}
1118 }
1119 
1120 void
1121 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
1122 {
1123 	uint32_t emask_fbd;
1124 
1125 	emask_fbd = MCERR_FBD_RD();
1126 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
1127 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
1128 		nb_mask_mc_set = 1;
1129 	}
1130 }
1131 
1132 static void
1133 nb_fbd_fini()
1134 {
1135 	ERR0_FBD_WR(0xffffffff);
1136 	ERR1_FBD_WR(0xffffffff);
1137 	ERR2_FBD_WR(0xffffffff);
1138 	MCERR_FBD_WR(0xffffffff);
1139 	EMASK_FBD_WR(0xffffffff);
1140 
1141 	ERR0_FBD_WR(nb_err0_fbd);
1142 	ERR1_FBD_WR(nb_err1_fbd);
1143 	ERR2_FBD_WR(nb_err2_fbd);
1144 	MCERR_FBD_WR(nb_mcerr_fbd);
1145 	EMASK_FBD_WR(nb_emask_fbd);
1146 }
1147 
1148 static void
1149 nb_mem_init()
1150 {
1151 	uint32_t err0_mem;
1152 	uint32_t err1_mem;
1153 	uint32_t err2_mem;
1154 	uint32_t mcerr_mem;
1155 	uint32_t emask_mem;
1156 	uint32_t emask_poll_mem;
1157 
1158 	err0_mem = ERR0_MEM_RD();
1159 	err1_mem = ERR1_MEM_RD();
1160 	err2_mem = ERR2_MEM_RD();
1161 	mcerr_mem = MCERR_MEM_RD();
1162 	emask_mem = EMASK_MEM_RD();
1163 
1164 	nb_err0_mem = err0_mem;
1165 	nb_err1_mem = err1_mem;
1166 	nb_err2_mem = err2_mem;
1167 	nb_mcerr_mem = mcerr_mem;
1168 	nb_emask_mem = emask_mem;
1169 
1170 	ERR0_MEM_WR(0xffffffff);
1171 	ERR1_MEM_WR(0xffffffff);
1172 	ERR2_MEM_WR(0xffffffff);
1173 	MCERR_MEM_WR(0xffffffff);
1174 	EMASK_MEM_WR(0xffffffff);
1175 
1176 	emask_poll_mem = nb5100_mask_poll_mem;
1177 	mcerr_mem |= emask_poll_mem;
1178 	err0_mem |= emask_poll_mem;
1179 	err1_mem |= emask_poll_mem;
1180 	err2_mem |= emask_poll_mem;
1181 
1182 	l_mcerr_mem = mcerr_mem;
1183 	ERR0_MEM_WR(err0_mem);
1184 	ERR1_MEM_WR(err1_mem);
1185 	ERR2_MEM_WR(err2_mem);
1186 	MCERR_MEM_WR(mcerr_mem);
1187 	if (nb5100_reset_emask_mem) {
1188 		EMASK_MEM_WR(~nb5100_mask_poll_mem);
1189 	} else {
1190 		EMASK_MEM_WR(nb_emask_mem);
1191 	}
1192 }
1193 
1194 void
1195 nb_mem_mask_mc(uint32_t mc_mask_mem)
1196 {
1197 	uint32_t emask_mem;
1198 
1199 	emask_mem = MCERR_MEM_RD();
1200 	if ((emask_mem & mc_mask_mem) != mc_mask_mem) {
1201 		MCERR_MEM_WR(emask_mem|mc_mask_mem);
1202 		nb_mask_mc_set = 1;
1203 	}
1204 }
1205 
1206 static void
1207 nb_mem_fini()
1208 {
1209 	ERR0_MEM_WR(0xffffffff);
1210 	ERR1_MEM_WR(0xffffffff);
1211 	ERR2_MEM_WR(0xffffffff);
1212 	MCERR_MEM_WR(0xffffffff);
1213 	EMASK_MEM_WR(0xffffffff);
1214 
1215 	ERR0_MEM_WR(nb_err0_mem);
1216 	ERR1_MEM_WR(nb_err1_mem);
1217 	ERR2_MEM_WR(nb_err2_mem);
1218 	MCERR_MEM_WR(nb_mcerr_mem);
1219 	EMASK_MEM_WR(nb_emask_mem);
1220 }
1221 
1222 static void
1223 nb_fsb_init()
1224 {
1225 	uint16_t err0_fsb;
1226 	uint16_t err1_fsb;
1227 	uint16_t err2_fsb;
1228 	uint16_t mcerr_fsb;
1229 	uint16_t emask_fsb;
1230 
1231 	err0_fsb = ERR0_FSB_RD(0);
1232 	err1_fsb = ERR1_FSB_RD(0);
1233 	err2_fsb = ERR2_FSB_RD(0);
1234 	mcerr_fsb = MCERR_FSB_RD(0);
1235 	emask_fsb = EMASK_FSB_RD(0);
1236 
1237 	ERR0_FSB_WR(0, 0xffff);
1238 	ERR1_FSB_WR(0, 0xffff);
1239 	ERR2_FSB_WR(0, 0xffff);
1240 	MCERR_FSB_WR(0, 0xffff);
1241 	EMASK_FSB_WR(0, 0xffff);
1242 
1243 	ERR0_FSB_WR(1, 0xffff);
1244 	ERR1_FSB_WR(1, 0xffff);
1245 	ERR2_FSB_WR(1, 0xffff);
1246 	MCERR_FSB_WR(1, 0xffff);
1247 	EMASK_FSB_WR(1, 0xffff);
1248 
1249 	nb_err0_fsb = err0_fsb;
1250 	nb_err1_fsb = err1_fsb;
1251 	nb_err2_fsb = err2_fsb;
1252 	nb_mcerr_fsb = mcerr_fsb;
1253 	nb_emask_fsb = emask_fsb;
1254 
1255 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
1256 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
1257 	mcerr_fsb |= nb5000_mask_poll_fsb;
1258 	err0_fsb |= nb5000_mask_poll_fsb;
1259 	err1_fsb |= nb5000_mask_poll_fsb;
1260 	err2_fsb |= nb5000_mask_poll_fsb;
1261 
1262 	l_mcerr_fsb = mcerr_fsb;
1263 	ERR0_FSB_WR(0, err0_fsb);
1264 	ERR1_FSB_WR(0, err1_fsb);
1265 	ERR2_FSB_WR(0, err2_fsb);
1266 	MCERR_FSB_WR(0, mcerr_fsb);
1267 	if (nb5000_reset_emask_fsb) {
1268 		EMASK_FSB_WR(0, nb5000_emask_fsb);
1269 	} else {
1270 		EMASK_FSB_WR(0, nb_emask_fsb);
1271 	}
1272 
1273 	ERR0_FSB_WR(1, err0_fsb);
1274 	ERR1_FSB_WR(1, err1_fsb);
1275 	ERR2_FSB_WR(1, err2_fsb);
1276 	MCERR_FSB_WR(1, mcerr_fsb);
1277 	if (nb5000_reset_emask_fsb) {
1278 		EMASK_FSB_WR(1, nb5000_emask_fsb);
1279 	} else {
1280 		EMASK_FSB_WR(1, nb_emask_fsb);
1281 	}
1282 
1283 	if (nb_chipset == INTEL_NB_7300) {
1284 		ERR0_FSB_WR(2, 0xffff);
1285 		ERR1_FSB_WR(2, 0xffff);
1286 		ERR2_FSB_WR(2, 0xffff);
1287 		MCERR_FSB_WR(2, 0xffff);
1288 		EMASK_FSB_WR(2, 0xffff);
1289 
1290 		ERR0_FSB_WR(3, 0xffff);
1291 		ERR1_FSB_WR(3, 0xffff);
1292 		ERR2_FSB_WR(3, 0xffff);
1293 		MCERR_FSB_WR(3, 0xffff);
1294 		EMASK_FSB_WR(3, 0xffff);
1295 
1296 		ERR0_FSB_WR(2, err0_fsb);
1297 		ERR1_FSB_WR(2, err1_fsb);
1298 		ERR2_FSB_WR(2, err2_fsb);
1299 		MCERR_FSB_WR(2, mcerr_fsb);
1300 		if (nb5000_reset_emask_fsb) {
1301 			EMASK_FSB_WR(2, nb5000_emask_fsb);
1302 		} else {
1303 			EMASK_FSB_WR(2, nb_emask_fsb);
1304 		}
1305 
1306 		ERR0_FSB_WR(3, err0_fsb);
1307 		ERR1_FSB_WR(3, err1_fsb);
1308 		ERR2_FSB_WR(3, err2_fsb);
1309 		MCERR_FSB_WR(3, mcerr_fsb);
1310 		if (nb5000_reset_emask_fsb) {
1311 			EMASK_FSB_WR(3, nb5000_emask_fsb);
1312 		} else {
1313 			EMASK_FSB_WR(3, nb_emask_fsb);
1314 		}
1315 	}
1316 }
1317 
1318 static void
1319 nb_fsb_fini() {
1320 	ERR0_FSB_WR(0, 0xffff);
1321 	ERR1_FSB_WR(0, 0xffff);
1322 	ERR2_FSB_WR(0, 0xffff);
1323 	MCERR_FSB_WR(0, 0xffff);
1324 	EMASK_FSB_WR(0, 0xffff);
1325 
1326 	ERR0_FSB_WR(0, nb_err0_fsb);
1327 	ERR1_FSB_WR(0, nb_err1_fsb);
1328 	ERR2_FSB_WR(0, nb_err2_fsb);
1329 	MCERR_FSB_WR(0, nb_mcerr_fsb);
1330 	EMASK_FSB_WR(0, nb_emask_fsb);
1331 
1332 	ERR0_FSB_WR(1, 0xffff);
1333 	ERR1_FSB_WR(1, 0xffff);
1334 	ERR2_FSB_WR(1, 0xffff);
1335 	MCERR_FSB_WR(1, 0xffff);
1336 	EMASK_FSB_WR(1, 0xffff);
1337 
1338 	ERR0_FSB_WR(1, nb_err0_fsb);
1339 	ERR1_FSB_WR(1, nb_err1_fsb);
1340 	ERR2_FSB_WR(1, nb_err2_fsb);
1341 	MCERR_FSB_WR(1, nb_mcerr_fsb);
1342 	EMASK_FSB_WR(1, nb_emask_fsb);
1343 
1344 	if (nb_chipset == INTEL_NB_7300) {
1345 		ERR0_FSB_WR(2, 0xffff);
1346 		ERR1_FSB_WR(2, 0xffff);
1347 		ERR2_FSB_WR(2, 0xffff);
1348 		MCERR_FSB_WR(2, 0xffff);
1349 		EMASK_FSB_WR(2, 0xffff);
1350 
1351 		ERR0_FSB_WR(2, nb_err0_fsb);
1352 		ERR1_FSB_WR(2, nb_err1_fsb);
1353 		ERR2_FSB_WR(2, nb_err2_fsb);
1354 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1355 		EMASK_FSB_WR(2, nb_emask_fsb);
1356 
1357 		ERR0_FSB_WR(3, 0xffff);
1358 		ERR1_FSB_WR(3, 0xffff);
1359 		ERR2_FSB_WR(3, 0xffff);
1360 		MCERR_FSB_WR(3, 0xffff);
1361 		EMASK_FSB_WR(3, 0xffff);
1362 
1363 		ERR0_FSB_WR(3, nb_err0_fsb);
1364 		ERR1_FSB_WR(3, nb_err1_fsb);
1365 		ERR2_FSB_WR(3, nb_err2_fsb);
1366 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1367 		EMASK_FSB_WR(3, nb_emask_fsb);
1368 	}
1369 }
1370 
1371 void
1372 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1373 {
1374 	uint16_t emask_fsb;
1375 
1376 	emask_fsb = MCERR_FSB_RD(fsb);
1377 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1378 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1379 		nb_mask_mc_set = 1;
1380 	}
1381 }
1382 
1383 static void
1384 nb_thr_init()
1385 {
1386 	uint16_t err0_thr;
1387 	uint16_t err1_thr;
1388 	uint16_t err2_thr;
1389 	uint16_t mcerr_thr;
1390 	uint16_t emask_thr;
1391 
1392 	if (nb_chipset == INTEL_NB_5400) {
1393 		err0_thr = ERR0_THR_RD(0);
1394 		err1_thr = ERR1_THR_RD(0);
1395 		err2_thr = ERR2_THR_RD(0);
1396 		mcerr_thr = MCERR_THR_RD(0);
1397 		emask_thr = EMASK_THR_RD(0);
1398 
1399 		ERR0_THR_WR(0xffff);
1400 		ERR1_THR_WR(0xffff);
1401 		ERR2_THR_WR(0xffff);
1402 		MCERR_THR_WR(0xffff);
1403 		EMASK_THR_WR(0xffff);
1404 
1405 		nb_err0_thr = err0_thr;
1406 		nb_err1_thr = err1_thr;
1407 		nb_err2_thr = err2_thr;
1408 		nb_mcerr_thr = mcerr_thr;
1409 		nb_emask_thr = emask_thr;
1410 
1411 		mcerr_thr &= ~nb_mask_bios_thr;
1412 		mcerr_thr |= nb_mask_bios_thr &
1413 		    (~err2_thr | ~err1_thr | ~err0_thr);
1414 		mcerr_thr |= nb_mask_poll_thr;
1415 		err0_thr |= nb_mask_poll_thr;
1416 		err1_thr |= nb_mask_poll_thr;
1417 		err2_thr |= nb_mask_poll_thr;
1418 
1419 		l_mcerr_thr = mcerr_thr;
1420 		ERR0_THR_WR(err0_thr);
1421 		ERR1_THR_WR(err1_thr);
1422 		ERR2_THR_WR(err2_thr);
1423 		MCERR_THR_WR(mcerr_thr);
1424 		EMASK_THR_WR(nb_emask_thr);
1425 	}
1426 }
1427 
1428 static void
1429 nb_thr_fini()
1430 {
1431 	if (nb_chipset == INTEL_NB_5400) {
1432 		ERR0_THR_WR(0xffff);
1433 		ERR1_THR_WR(0xffff);
1434 		ERR2_THR_WR(0xffff);
1435 		MCERR_THR_WR(0xffff);
1436 		EMASK_THR_WR(0xffff);
1437 
1438 		ERR0_THR_WR(nb_err0_thr);
1439 		ERR1_THR_WR(nb_err1_thr);
1440 		ERR2_THR_WR(nb_err2_thr);
1441 		MCERR_THR_WR(nb_mcerr_thr);
1442 		EMASK_THR_WR(nb_emask_thr);
1443 	}
1444 }
1445 
1446 void
1447 nb_thr_mask_mc(uint16_t mc_mask_thr)
1448 {
1449 	uint16_t emask_thr;
1450 
1451 	emask_thr = MCERR_THR_RD(0);
1452 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1453 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1454 		nb_mask_mc_set = 1;
1455 	}
1456 }
1457 
1458 void
1459 nb_mask_mc_reset()
1460 {
1461 	if (nb_chipset == INTEL_NB_5100)
1462 		MCERR_MEM_WR(l_mcerr_mem);
1463 	else
1464 		MCERR_FBD_WR(l_mcerr_fbd);
1465 	MCERR_INT_WR(l_mcerr_int);
1466 	MCERR_FSB_WR(0, l_mcerr_fsb);
1467 	MCERR_FSB_WR(1, l_mcerr_fsb);
1468 	if (nb_chipset == INTEL_NB_7300) {
1469 		MCERR_FSB_WR(2, l_mcerr_fsb);
1470 		MCERR_FSB_WR(3, l_mcerr_fsb);
1471 	}
1472 	if (nb_chipset == INTEL_NB_5400) {
1473 		MCERR_THR_WR(l_mcerr_thr);
1474 	}
1475 }
1476 
1477 int
1478 nb_dev_init()
1479 {
1480 	find_dimm_label_t *label_function_p;
1481 
1482 	label_function_p = find_dimms_per_channel();
1483 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1484 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1485 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1486 	if (nb_queue == NULL) {
1487 		mutex_destroy(&nb_mutex);
1488 		return (EAGAIN);
1489 	}
1490 	nb_int_init();
1491 	nb_thr_init();
1492 	dimm_init();
1493 	nb_dimms_init(label_function_p);
1494 	nb_mc_init();
1495 	nb_pex_init();
1496 	if (nb_chipset == INTEL_NB_5100)
1497 		nb_mem_init();
1498 	else
1499 		nb_fbd_init();
1500 	nb_fsb_init();
1501 	nb_scrubber_enable();
1502 	return (0);
1503 }
1504 
1505 int
1506 nb_init()
1507 {
1508 	/* return ENOTSUP if there is no PCI config space support. */
1509 	if (pci_getl_func == NULL)
1510 		return (ENOTSUP);
1511 
1512 	/* get vendor and device */
1513 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1514 	switch (nb_chipset) {
1515 	default:
1516 		if (nb_5000_memory_controller == 0)
1517 			return (ENOTSUP);
1518 		break;
1519 	case INTEL_NB_7300:
1520 	case INTEL_NB_5000P:
1521 	case INTEL_NB_5000X:
1522 		break;
1523 	case INTEL_NB_5000V:
1524 	case INTEL_NB_5000Z:
1525 		nb_number_memory_controllers = 1;
1526 		break;
1527 	case INTEL_NB_5100:
1528 		nb_channels_per_branch = 1;
1529 		break;
1530 	case INTEL_NB_5400:
1531 	case INTEL_NB_5400A:
1532 	case INTEL_NB_5400B:
1533 		nb_chipset = INTEL_NB_5400;
1534 		break;
1535 	}
1536 	return (0);
1537 }
1538 
1539 void
1540 nb_dev_reinit()
1541 {
1542 	int i, j;
1543 	int nchannels = nb_number_memory_controllers * 2;
1544 	nb_dimm_t **dimmpp;
1545 	nb_dimm_t *dimmp;
1546 	nb_dimm_t **old_nb_dimms;
1547 	int old_nb_dimms_per_channel;
1548 	find_dimm_label_t *label_function_p;
1549 	int dimm_slot = nb_dimm_slots;
1550 
1551 	old_nb_dimms = nb_dimms;
1552 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1553 
1554 	dimm_fini();
1555 	nb_dimms_per_channel = 0;
1556 	label_function_p = find_dimms_per_channel();
1557 	dimm_init();
1558 	nb_dimms_init(label_function_p);
1559 	nb_mc_init();
1560 	nb_pex_init();
1561 	nb_int_init();
1562 	nb_thr_init();
1563 	if (nb_chipset == INTEL_NB_5100)
1564 		nb_mem_init();
1565 	else
1566 		nb_fbd_init();
1567 	nb_fsb_init();
1568 	nb_scrubber_enable();
1569 
1570 	dimmpp = old_nb_dimms;
1571 	for (i = 0; i < nchannels; i++) {
1572 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1573 			dimmp = *dimmpp;
1574 			if (dimmp) {
1575 				kmem_free(dimmp, sizeof (nb_dimm_t));
1576 				*dimmpp = NULL;
1577 			}
1578 			dimmp++;
1579 		}
1580 	}
1581 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1582 }
1583 
1584 void
1585 nb_dev_unload()
1586 {
1587 	errorq_destroy(nb_queue);
1588 	nb_queue = NULL;
1589 	mutex_destroy(&nb_mutex);
1590 	nb_int_fini();
1591 	nb_thr_fini();
1592 	if (nb_chipset == INTEL_NB_5100)
1593 		nb_mem_fini();
1594 	else
1595 		nb_fbd_fini();
1596 	nb_fsb_fini();
1597 	nb_pex_fini();
1598 	nb_fini();
1599 }
1600 
1601 void
1602 nb_unload()
1603 {
1604 }
1605