xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c (revision c9eab9d4e096bb9b983e9b007577edfa73c32eff)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_dimms_per_channel = 0;
56 
57 nb_dimm_t **nb_dimms;
58 int nb_ndimm;
59 uint32_t nb_chipset;
60 enum nb_memory_mode nb_mode;
61 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
62 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
63 uint32_t top_of_low_memory;
64 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
65 
66 extern int nb_no_smbios;
67 
68 errorq_t *nb_queue;
69 kmutex_t nb_mutex;
70 
71 static int nb_dimm_slots;
72 
73 static uint8_t nb_err0_int;
74 static uint8_t nb_err1_int;
75 static uint8_t nb_err2_int;
76 static uint8_t nb_mcerr_int;
77 static uint32_t nb_emask_int;
78 
79 static uint32_t nb_err0_fbd;
80 static uint32_t nb_err1_fbd;
81 static uint32_t nb_err2_fbd;
82 static uint32_t nb_mcerr_fbd;
83 static uint32_t nb_emask_fbd;
84 
85 static uint16_t nb_err0_fsb;
86 static uint16_t nb_err1_fsb;
87 static uint16_t nb_err2_fsb;
88 static uint16_t nb_mcerr_fsb;
89 static uint16_t nb_emask_fsb;
90 
91 static uint16_t nb_err0_thr;
92 static uint16_t nb_err1_thr;
93 static uint16_t nb_err2_thr;
94 static uint16_t nb_mcerr_thr;
95 static uint16_t nb_emask_thr;
96 
97 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
98 static uint32_t emask_cor_pex[NB_PCI_DEV];
99 static uint32_t emask_rp_pex[NB_PCI_DEV];
100 static uint32_t docmd_pex[NB_PCI_DEV];
101 static uint32_t uncerrsev[NB_PCI_DEV];
102 
103 static uint8_t l_mcerr_int;
104 static uint32_t l_mcerr_fbd;
105 static uint16_t l_mcerr_fsb;
106 static uint16_t l_mcerr_thr;
107 
108 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
109 uint_t nb5400_emask_fbd = 0;
110 int nb5000_reset_emask_fbd = 1;
111 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
112 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
113 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
114 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
115 
116 uint_t nb5000_emask_fsb = 0;
117 int nb5000_reset_emask_fsb = 1;
118 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
119 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
120 
121 uint_t nb5400_emask_int = 0;
122 
123 uint_t nb7300_emask_int = EMASK_INT_7300;
124 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
125 uint_t nb5000_emask_int = EMASK_INT_5000;
126 int nb5000_reset_emask_int = 1;
127 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
128 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
129 
130 uint_t nb_mask_poll_thr = EMASK_THR_NF;
131 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
132 
133 int nb5000_reset_uncor_pex = 0;
134 uint_t nb5000_mask_uncor_pex = 0;
135 int nb5000_reset_cor_pex = 0;
136 uint_t nb5000_mask_cor_pex = 0xffffffff;
137 int nb_set_docmd = 1;
138 uint32_t nb5000_rp_pex = 0x1;
139 uint32_t nb5000_docmd_pex_mask = DOCMD_PEX_MASK;
140 uint32_t nb5400_docmd_pex_mask = DOCMD_5400_PEX_MASK;
141 uint32_t nb5000_docmd_pex = DOCMD_PEX;
142 uint32_t nb5400_docmd_pex = DOCMD_5400_PEX;
143 
144 int nb_mask_mc_set;
145 
146 typedef struct find_dimm_label {
147 	void (*label_function)(int, char *, int);
148 } find_dimm_label_t;
149 
150 static void x8450_dimm_label(int, char *, int);
151 
152 static struct platform_label {
153 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
154 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
155 	find_dimm_label_t dimm_label;
156 	int dimms_per_channel;
157 } platform_label[] = {
158 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
159 	    x8450_dimm_label, 8 },
160 	{ NULL, NULL, NULL, 0 }
161 };
162 
163 static unsigned short
164 read_spd(int bus)
165 {
166 	unsigned short rt = 0;
167 	int branch = bus >> 1;
168 	int channel = bus & 1;
169 
170 	rt = SPD_RD(branch, channel);
171 
172 	return (rt);
173 }
174 
175 static void
176 write_spdcmd(int bus, uint32_t val)
177 {
178 	int branch = bus >> 1;
179 	int channel = bus & 1;
180 	SPDCMD_WR(branch, channel, val);
181 }
182 
183 static int
184 read_spd_eeprom(int bus, int slave, int addr)
185 {
186 	int retry = 4;
187 	int wait;
188 	int spd;
189 	uint32_t cmd;
190 
191 	for (;;) {
192 		wait = 1000;
193 		for (;;) {
194 			spd = read_spd(bus);
195 			if ((spd & SPD_BUSY) == 0)
196 				break;
197 			if (--wait == 0)
198 				return (-1);
199 			drv_usecwait(10);
200 		}
201 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
202 		write_spdcmd(bus, cmd);
203 		wait = 1000;
204 		for (;;) {
205 			spd = read_spd(bus);
206 			if ((spd & SPD_BUSY) == 0)
207 				break;
208 			if (--wait == 0) {
209 				spd = SPD_BUS_ERROR;
210 				break;
211 			}
212 			drv_usecwait(10);
213 		}
214 		while ((spd & SPD_BUS_ERROR) == 0 &&
215 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
216 		    SPD_READ_DATA_VALID) {
217 			spd = read_spd(bus);
218 			if (--wait == 0)
219 				return (-1);
220 		}
221 		if ((spd & SPD_BUS_ERROR) == 0)
222 			break;
223 		if (--retry == 0)
224 			return (-1);
225 	}
226 	return (spd & 0xff);
227 }
228 
229 static void
230 nb_fini()
231 {
232 	int i, j;
233 	int nchannels = nb_number_memory_controllers * 2;
234 	nb_dimm_t **dimmpp;
235 	nb_dimm_t *dimmp;
236 
237 	dimmpp = nb_dimms;
238 	for (i = 0; i < nchannels; i++) {
239 		for (j = 0; j < nb_dimms_per_channel; j++) {
240 			dimmp = *dimmpp;
241 			if (dimmp) {
242 				kmem_free(dimmp, sizeof (nb_dimm_t));
243 				*dimmpp = NULL;
244 			}
245 			dimmp++;
246 		}
247 	}
248 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
249 	nb_dimms = NULL;
250 	dimm_fini();
251 }
252 
253 void
254 nb_scrubber_enable()
255 {
256 	uint32_t mc;
257 
258 	if (!nb_hw_memory_scrub_enable)
259 		return;
260 
261 	mc = MC_RD();
262 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
263 		mc |= MC_PATROL_SCRUB;
264 	else
265 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
266 	MC_WR(mc);
267 
268 	if (nb_sw_scrub_disabled++)
269 		cmi_mc_sw_memscrub_disable();
270 }
271 
272 static nb_dimm_t *
273 nb_dimm_init(int channel, int dimm, uint16_t mtr)
274 {
275 	nb_dimm_t *dp;
276 	int i, t;
277 	int spd_sz;
278 
279 	if (MTR_PRESENT(mtr) == 0)
280 		return (NULL);
281 	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
282 
283 	if (t != 9)
284 		return (NULL);
285 
286 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
287 
288 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
289 	if (t == 1)
290 		spd_sz = 128;
291 	else if (t == 2)
292 		spd_sz = 176;
293 	else
294 		spd_sz = 256;
295 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
296 	    (read_spd_eeprom(channel, dimm, 118) << 8);
297 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
298 	dp->serial_number =
299 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
300 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
301 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
302 	    read_spd_eeprom(channel, dimm, 125);
303 	t = read_spd_eeprom(channel, dimm, 121);
304 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
305 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
306 	if (spd_sz > 128) {
307 		for (i = 0; i < sizeof (dp->part_number); i++) {
308 			dp->part_number[i] =
309 			    read_spd_eeprom(channel, dimm, 128 + i);
310 		}
311 		for (i = 0; i < sizeof (dp->revision); i++) {
312 			dp->revision[i] =
313 			    read_spd_eeprom(channel, dimm, 146 + i);
314 		}
315 	}
316 	dp->mtr_present = MTR_PRESENT(mtr);
317 	dp->nranks = MTR_NUMRANK(mtr);
318 	dp->nbanks = MTR_NUMBANK(mtr);
319 	dp->ncolumn = MTR_NUMCOL(mtr);
320 	dp->nrow = MTR_NUMROW(mtr);
321 	dp->width = MTR_WIDTH(mtr);
322 	dp->dimm_size = MTR_DIMMSIZE(mtr);
323 
324 	return (dp);
325 }
326 
327 static uint64_t
328 mc_range(int controller, uint64_t base)
329 {
330 	int i;
331 	uint64_t limit = 0;
332 
333 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
334 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
335 		    base < nb_banks[i].limit) {
336 			limit = nb_banks[i].limit;
337 			if (base <= top_of_low_memory &&
338 			    limit > top_of_low_memory) {
339 				limit -= TLOW_MAX - top_of_low_memory;
340 			}
341 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
342 			    nb_mode != NB_MEMORY_MIRROR) {
343 				limit = limit / 2;
344 			}
345 		}
346 	}
347 	return (limit);
348 }
349 
350 void
351 nb_mc_init()
352 {
353 	uint16_t tolm;
354 	uint16_t mir;
355 	uint32_t hole_base;
356 	uint32_t hole_size;
357 	uint32_t dmir;
358 	uint64_t base;
359 	uint64_t limit;
360 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
361 	int i, j, k;
362 	uint8_t interleave;
363 
364 	base = 0;
365 	tolm = TOLM_RD();
366 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
367 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
368 		mir = MIR_RD(i);
369 		limit = (uint64_t)(mir >> 4) << 28;
370 		way0 = mir & 1;
371 		way1 = (mir >> 1) & 1;
372 		if (way0 == 0 && way1 == 0) {
373 			way0 = 1;
374 			way1 = 1;
375 		}
376 		if (limit > top_of_low_memory)
377 			limit += TLOW_MAX - top_of_low_memory;
378 		nb_banks[i].base = base;
379 		nb_banks[i].limit = limit;
380 		nb_banks[i].way[0] = way0;
381 		nb_banks[i].way[1] = way1;
382 		base = limit;
383 	}
384 	for (i = 0; i < nb_number_memory_controllers; i++) {
385 		base = 0;
386 
387 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
388 			dmir = DMIR_RD(i, j);
389 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
390 			if (limit == 0) {
391 				limit = mc_range(i, base);
392 			}
393 			branch_interleave = 0;
394 			hole_base = 0;
395 			hole_size = 0;
396 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
397 			if (rank0 == rank1)
398 				interleave = 1;
399 			else if (rank0 == rank2)
400 				interleave = 2;
401 			else
402 				interleave = 4;
403 			if (nb_mode != NB_MEMORY_MIRROR &&
404 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
405 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
406 					if (base >= nb_banks[k].base &&
407 					    base < nb_banks[k].limit) {
408 						if (nb_banks[i].way[0] &&
409 						    nb_banks[i].way[1]) {
410 							interleave *= 2;
411 							limit *= 2;
412 							branch_interleave = 1;
413 						}
414 						break;
415 					}
416 				}
417 			}
418 			if (base < top_of_low_memory &&
419 			    limit > top_of_low_memory) {
420 				hole_base = top_of_low_memory;
421 				hole_size = TLOW_MAX - top_of_low_memory;
422 				limit += hole_size;
423 			} else if (base > top_of_low_memory) {
424 				limit += TLOW_MAX - top_of_low_memory;
425 			}
426 			nb_ranks[i][j].base = base;
427 			nb_ranks[i][j].limit = limit;
428 			nb_ranks[i][j].rank[0] = rank0;
429 			nb_ranks[i][j].rank[1] = rank1;
430 			nb_ranks[i][j].rank[2] = rank2;
431 			nb_ranks[i][j].rank[3] = rank3;
432 			nb_ranks[i][j].interleave = interleave;
433 			nb_ranks[i][j].branch_interleave = branch_interleave;
434 			nb_ranks[i][j].hole_base = hole_base;
435 			nb_ranks[i][j].hole_size = hole_size;
436 			if (limit > base) {
437 				dimm_add_rank(i, rank0, branch_interleave, 0,
438 				    base, hole_base, hole_size, interleave,
439 				    limit);
440 				if (rank0 != rank1) {
441 					dimm_add_rank(i, rank1,
442 					    branch_interleave, 1, base,
443 					    hole_base, hole_size, interleave,
444 					    limit);
445 					if (rank0 != rank2) {
446 						dimm_add_rank(i, rank2,
447 						    branch_interleave, 2, base,
448 						    hole_base, hole_size,
449 						    interleave, limit);
450 						dimm_add_rank(i, rank3,
451 						    branch_interleave, 3, base,
452 						    hole_base, hole_size,
453 						    interleave, limit);
454 					}
455 				}
456 			}
457 			base = limit;
458 		}
459 	}
460 }
461 
462 void
463 nb_used_spare_rank(int branch, int bad_rank)
464 {
465 	int i;
466 	int j;
467 
468 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
469 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
470 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
471 				nb_ranks[branch][i].rank[j] =
472 				    spare_rank[branch];
473 				i = NB_MEM_RANK_SELECT;
474 				break;
475 			}
476 		}
477 	}
478 }
479 
480 find_dimm_label_t *
481 find_dimms_per_channel()
482 {
483 	struct platform_label *pl;
484 	smbios_info_t si;
485 	smbios_system_t sy;
486 	id_t id;
487 	int i, j;
488 	uint16_t mtr;
489 	find_dimm_label_t *rt = NULL;
490 
491 	if (ksmbios != NULL && nb_no_smbios == 0) {
492 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
493 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
494 			for (pl = platform_label; pl->sys_vendor; pl++) {
495 				if (strncmp(pl->sys_vendor,
496 				    si.smbi_manufacturer,
497 				    strlen(pl->sys_vendor)) == 0 &&
498 				    strncmp(pl->sys_product, si.smbi_product,
499 				    strlen(pl->sys_product)) == 0) {
500 					nb_dimms_per_channel =
501 					    pl->dimms_per_channel;
502 					rt = &pl->dimm_label;
503 					break;
504 				}
505 			}
506 		}
507 	}
508 	if (nb_dimms_per_channel == 0) {
509 		/*
510 		 * Scan all memory channels if we find a channel which has more
511 		 * dimms then we have seen before set nb_dimms_per_channel to
512 		 * the number of dimms on the channel
513 		 */
514 		for (i = 0; i < nb_number_memory_controllers; i++) {
515 			for (j = nb_dimms_per_channel;
516 			    j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
517 				mtr = MTR_RD(i, j);
518 				if (MTR_PRESENT(mtr))
519 					nb_dimms_per_channel = j + 1;
520 			}
521 		}
522 	}
523 	return (rt);
524 }
525 
526 static int
527 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
528 {
529 	nb_dimm_t ***dimmpp = arg;
530 	nb_dimm_t *dimmp;
531 	smbios_memdevice_t md;
532 
533 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
534 		if (*dimmpp >= &nb_dimms[nb_dimm_slots])
535 			return (-1);
536 		dimmp = **dimmpp;
537 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
538 		    md.smbmd_dloc != NULL) {
539 			if (md.smbmd_size) {
540 				/*
541 				 * if there is no physical dimm for this smbios
542 				 * record it is because this system has less
543 				 * physical slots than the controller supports
544 				 * so skip empty slots to find the slot this
545 				 * smbios record belongs too
546 				 */
547 				while (dimmp == NULL) {
548 					if (*dimmpp >= &nb_dimms[nb_dimm_slots])
549 						return (-1);
550 					dimmp = *(++(*dimmpp));
551 				}
552 				(void) snprintf(dimmp->label,
553 				    sizeof (dimmp->label), "%s", md.smbmd_dloc);
554 				(*dimmpp)++;
555 			}
556 		}
557 	}
558 	return (0);
559 }
560 
561 void
562 nb_smbios()
563 {
564 	nb_dimm_t **dimmpp;
565 
566 	if (ksmbios != NULL && nb_no_smbios == 0) {
567 		dimmpp = nb_dimms;
568 		(void) smbios_iter(ksmbios, dimm_label, &dimmpp);
569 	}
570 }
571 
572 static void
573 x8450_dimm_label(int dimm, char *label, int label_sz)
574 {
575 	int channel = dimm >> 3;
576 
577 	dimm = dimm & 0x7;
578 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
579 }
580 
581 static void
582 nb_dimms_init(find_dimm_label_t *label_function)
583 {
584 	int i, j, k, l;
585 	uint16_t mtr;
586 	uint32_t mc, mca;
587 	uint32_t spcpc;
588 	uint8_t spcps;
589 	nb_dimm_t **dimmpp;
590 
591 	mca = MCA_RD();
592 	mc = MC_RD();
593 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
594 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
595 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
596 		nb_mode = NB_MEMORY_MIRROR;
597 	else
598 		nb_mode = NB_MEMORY_NORMAL;
599 	nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
600 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
601 	    nb_dimm_slots, KM_SLEEP);
602 	dimmpp = nb_dimms;
603 	for (i = 0; i < nb_number_memory_controllers; i++) {
604 		if (nb_mode == NB_MEMORY_NORMAL) {
605 			spcpc = SPCPC_RD(i);
606 			spcps = SPCPS_RD(i);
607 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
608 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
609 				nb_mode = NB_MEMORY_SPARE_RANK;
610 			spare_rank[i] = SPCPC_SPRANK(spcpc);
611 		}
612 		for (j = 0; j < nb_dimms_per_channel; j++) {
613 			mtr = MTR_RD(i, j);
614 			k = i * 2;
615 			dimmpp[j] = nb_dimm_init(k, j, mtr);
616 			if (dimmpp[j]) {
617 				nb_ndimm ++;
618 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
619 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
620 				    dimmpp[j]->nrow);
621 				if (label_function) {
622 					label_function->label_function(
623 					    (k * nb_dimms_per_channel) + j,
624 					    dimmpp[j]->label,
625 					    sizeof (dimmpp[j]->label));
626 				}
627 			}
628 			dimmpp[j + nb_dimms_per_channel] =
629 			    nb_dimm_init(k + 1, j, mtr);
630 			l = j + nb_dimms_per_channel;
631 			if (dimmpp[l]) {
632 				if (label_function) {
633 					label_function->label_function(
634 					    (k * nb_dimms_per_channel) + l,
635 					    dimmpp[l]->label,
636 					    sizeof (dimmpp[l]->label));
637 				}
638 				nb_ndimm ++;
639 			}
640 		}
641 		dimmpp += nb_dimms_per_channel * 2;
642 	}
643 	if (label_function == NULL)
644 		nb_smbios();
645 }
646 
647 
648 /* Setup the ESI port registers to enable SERR for southbridge */
649 static void
650 nb_pex_init()
651 {
652 	int i = 0; /* ESI port */
653 	uint32_t mask;
654 	uint16_t regw;
655 
656 	emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
657 	emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
658 	emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
659 	docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
660 	uncerrsev[i] = UNCERRSEV_RD(i);
661 
662 	if (nb5000_reset_uncor_pex)
663 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
664 	if (nb5000_reset_cor_pex)
665 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
666 	if (nb_set_docmd) {
667 		if (nb_chipset == INTEL_NB_5400) {
668 			/* disable masking of ERR pins used by DOCMD */
669 			PEX_ERR_PIN_MASK_WR(i, 0x10);
670 
671 			mask = (docmd_pex[i] & nb5400_docmd_pex_mask) |
672 			    (nb5400_docmd_pex & ~nb5400_docmd_pex_mask);
673 		} else {
674 			mask = (docmd_pex[i] & nb5000_docmd_pex_mask) |
675 			    (nb5000_docmd_pex & ~nb5000_docmd_pex_mask);
676 		}
677 		PEX_ERR_DOCMD_WR(i, mask);
678 	}
679 
680 	/* RP error message (CE/NFE/FE) detect mask */
681 	EMASK_RP_PEX_WR(i, nb5000_rp_pex);
682 
683 	/* Command Register - Enable SERR */
684 	regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
685 	nb_pci_putw(0, i, 0, PCI_CONF_COMM,
686 	    regw | PCI_COMM_SERR_ENABLE);
687 
688 	/* Root Control Register - SERR on NFE/FE */
689 	PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
690 	    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
691 
692 	/* AER UE Mask - Mask UR */
693 	UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
694 }
695 
696 static void
697 nb_pex_fini()
698 {
699 	int i = 0; /* ESI port */
700 
701 	EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
702 	EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
703 	EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
704 	PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
705 
706 	if (nb5000_reset_uncor_pex)
707 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
708 	if (nb5000_reset_cor_pex)
709 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
710 }
711 
712 void
713 nb_int_init()
714 {
715 	uint8_t err0_int;
716 	uint8_t err1_int;
717 	uint8_t err2_int;
718 	uint8_t mcerr_int;
719 	uint32_t emask_int;
720 	uint16_t stepping;
721 
722 	err0_int = ERR0_INT_RD();
723 	err1_int = ERR1_INT_RD();
724 	err2_int = ERR2_INT_RD();
725 	mcerr_int = MCERR_INT_RD();
726 	emask_int = EMASK_INT_RD();
727 
728 	nb_err0_int = err0_int;
729 	nb_err1_int = err1_int;
730 	nb_err2_int = err2_int;
731 	nb_mcerr_int = mcerr_int;
732 	nb_emask_int = emask_int;
733 
734 	ERR0_INT_WR(0xff);
735 	ERR1_INT_WR(0xff);
736 	ERR2_INT_WR(0xff);
737 	MCERR_INT_WR(0xff);
738 	EMASK_INT_WR(0xff);
739 
740 	mcerr_int &= ~nb5000_mask_bios_int;
741 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
742 	mcerr_int |= nb5000_mask_poll_int;
743 	err0_int |= nb5000_mask_poll_int;
744 	err1_int |= nb5000_mask_poll_int;
745 	err2_int |= nb5000_mask_poll_int;
746 
747 	l_mcerr_int = mcerr_int;
748 	ERR0_INT_WR(err0_int);
749 	ERR1_INT_WR(err1_int);
750 	ERR2_INT_WR(err2_int);
751 	MCERR_INT_WR(mcerr_int);
752 	if (nb5000_reset_emask_int) {
753 		if (nb_chipset == INTEL_NB_7300) {
754 			stepping = NB5000_STEPPING();
755 			if (stepping == 0)
756 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
757 			else
758 				EMASK_5000_INT_WR(nb7300_emask_int);
759 		} else if (nb_chipset == INTEL_NB_5400) {
760 			EMASK_5400_INT_WR(nb5400_emask_int |
761 			    (emask_int & EMASK_INT_RES));
762 		} else {
763 			EMASK_5000_INT_WR(nb5000_emask_int);
764 		}
765 	} else {
766 		EMASK_INT_WR(nb_emask_int);
767 	}
768 }
769 
770 void
771 nb_int_fini()
772 {
773 	ERR0_INT_WR(0xff);
774 	ERR1_INT_WR(0xff);
775 	ERR2_INT_WR(0xff);
776 	MCERR_INT_WR(0xff);
777 	EMASK_INT_WR(0xff);
778 
779 	ERR0_INT_WR(nb_err0_int);
780 	ERR1_INT_WR(nb_err1_int);
781 	ERR2_INT_WR(nb_err2_int);
782 	MCERR_INT_WR(nb_mcerr_int);
783 	EMASK_INT_WR(nb_emask_int);
784 }
785 
786 void
787 nb_int_mask_mc(uint32_t mc_mask_int)
788 {
789 	uint32_t emask_int;
790 
791 	emask_int = MCERR_INT_RD();
792 	if ((emask_int & mc_mask_int) != mc_mask_int) {
793 		MCERR_INT_WR(emask_int|mc_mask_int);
794 		nb_mask_mc_set = 1;
795 	}
796 }
797 
798 void
799 nb_fbd_init()
800 {
801 	uint32_t err0_fbd;
802 	uint32_t err1_fbd;
803 	uint32_t err2_fbd;
804 	uint32_t mcerr_fbd;
805 	uint32_t emask_fbd;
806 	uint32_t emask_bios_fbd;
807 	uint32_t emask_poll_fbd;
808 
809 	err0_fbd = ERR0_FBD_RD();
810 	err1_fbd = ERR1_FBD_RD();
811 	err2_fbd = ERR2_FBD_RD();
812 	mcerr_fbd = MCERR_FBD_RD();
813 	emask_fbd = EMASK_FBD_RD();
814 
815 	nb_err0_fbd = err0_fbd;
816 	nb_err1_fbd = err1_fbd;
817 	nb_err2_fbd = err2_fbd;
818 	nb_mcerr_fbd = mcerr_fbd;
819 	nb_emask_fbd = emask_fbd;
820 
821 	ERR0_FBD_WR(0xffffffff);
822 	ERR1_FBD_WR(0xffffffff);
823 	ERR2_FBD_WR(0xffffffff);
824 	MCERR_FBD_WR(0xffffffff);
825 	EMASK_FBD_WR(0xffffffff);
826 
827 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
828 		/* MCH 7300 errata 34 */
829 		emask_bios_fbd = nb5000_mask_bios_fbd & ~EMASK_FBD_M23;
830 		emask_poll_fbd = nb5000_mask_poll_fbd;
831 		mcerr_fbd |= EMASK_FBD_M23;
832 	} else if (nb_chipset == INTEL_NB_5400) {
833 		emask_bios_fbd = nb5400_mask_bios_fbd;
834 		emask_poll_fbd = nb5400_mask_poll_fbd;
835 	} else {
836 		emask_bios_fbd = nb5000_mask_bios_fbd;
837 		emask_poll_fbd = nb5000_mask_poll_fbd;
838 	}
839 	mcerr_fbd &= ~emask_bios_fbd;
840 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
841 	mcerr_fbd |= emask_poll_fbd;
842 	err0_fbd |= emask_poll_fbd;
843 	err1_fbd |= emask_poll_fbd;
844 	err2_fbd |= emask_poll_fbd;
845 
846 	l_mcerr_fbd = mcerr_fbd;
847 	ERR0_FBD_WR(err0_fbd);
848 	ERR1_FBD_WR(err1_fbd);
849 	ERR2_FBD_WR(err2_fbd);
850 	MCERR_FBD_WR(mcerr_fbd);
851 	if (nb5000_reset_emask_fbd) {
852 		if (nb_chipset == INTEL_NB_5400)
853 			EMASK_FBD_WR(nb5400_emask_fbd);
854 		else
855 			EMASK_FBD_WR(nb5000_emask_fbd);
856 	} else {
857 		EMASK_FBD_WR(nb_emask_fbd);
858 	}
859 }
860 
861 void
862 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
863 {
864 	uint32_t emask_fbd;
865 
866 	emask_fbd = MCERR_FBD_RD();
867 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
868 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
869 		nb_mask_mc_set = 1;
870 	}
871 }
872 
873 void
874 nb_fbd_fini()
875 {
876 	ERR0_FBD_WR(0xffffffff);
877 	ERR1_FBD_WR(0xffffffff);
878 	ERR2_FBD_WR(0xffffffff);
879 	MCERR_FBD_WR(0xffffffff);
880 	EMASK_FBD_WR(0xffffffff);
881 
882 	ERR0_FBD_WR(nb_err0_fbd);
883 	ERR1_FBD_WR(nb_err1_fbd);
884 	ERR2_FBD_WR(nb_err2_fbd);
885 	MCERR_FBD_WR(nb_mcerr_fbd);
886 	EMASK_FBD_WR(nb_emask_fbd);
887 }
888 
889 static void
890 nb_fsb_init()
891 {
892 	uint16_t err0_fsb;
893 	uint16_t err1_fsb;
894 	uint16_t err2_fsb;
895 	uint16_t mcerr_fsb;
896 	uint16_t emask_fsb;
897 
898 	err0_fsb = ERR0_FSB_RD(0);
899 	err1_fsb = ERR1_FSB_RD(0);
900 	err2_fsb = ERR2_FSB_RD(0);
901 	mcerr_fsb = MCERR_FSB_RD(0);
902 	emask_fsb = EMASK_FSB_RD(0);
903 
904 	ERR0_FSB_WR(0, 0xffff);
905 	ERR1_FSB_WR(0, 0xffff);
906 	ERR2_FSB_WR(0, 0xffff);
907 	MCERR_FSB_WR(0, 0xffff);
908 	EMASK_FSB_WR(0, 0xffff);
909 
910 	ERR0_FSB_WR(1, 0xffff);
911 	ERR1_FSB_WR(1, 0xffff);
912 	ERR2_FSB_WR(1, 0xffff);
913 	MCERR_FSB_WR(1, 0xffff);
914 	EMASK_FSB_WR(1, 0xffff);
915 
916 	nb_err0_fsb = err0_fsb;
917 	nb_err1_fsb = err1_fsb;
918 	nb_err2_fsb = err2_fsb;
919 	nb_mcerr_fsb = mcerr_fsb;
920 	nb_emask_fsb = emask_fsb;
921 
922 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
923 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
924 	mcerr_fsb |= nb5000_mask_poll_fsb;
925 	err0_fsb |= nb5000_mask_poll_fsb;
926 	err1_fsb |= nb5000_mask_poll_fsb;
927 	err2_fsb |= nb5000_mask_poll_fsb;
928 
929 	l_mcerr_fsb = mcerr_fsb;
930 	ERR0_FSB_WR(0, err0_fsb);
931 	ERR1_FSB_WR(0, err1_fsb);
932 	ERR2_FSB_WR(0, err2_fsb);
933 	MCERR_FSB_WR(0, mcerr_fsb);
934 	if (nb5000_reset_emask_fsb) {
935 		EMASK_FSB_WR(0, nb5000_emask_fsb);
936 	} else {
937 		EMASK_FSB_WR(0, nb_emask_fsb);
938 	}
939 
940 	ERR0_FSB_WR(1, err0_fsb);
941 	ERR1_FSB_WR(1, err1_fsb);
942 	ERR2_FSB_WR(1, err2_fsb);
943 	MCERR_FSB_WR(1, mcerr_fsb);
944 	if (nb5000_reset_emask_fsb) {
945 		EMASK_FSB_WR(1, nb5000_emask_fsb);
946 	} else {
947 		EMASK_FSB_WR(1, nb_emask_fsb);
948 	}
949 
950 	if (nb_chipset == INTEL_NB_7300) {
951 		ERR0_FSB_WR(2, 0xffff);
952 		ERR1_FSB_WR(2, 0xffff);
953 		ERR2_FSB_WR(2, 0xffff);
954 		MCERR_FSB_WR(2, 0xffff);
955 		EMASK_FSB_WR(2, 0xffff);
956 
957 		ERR0_FSB_WR(3, 0xffff);
958 		ERR1_FSB_WR(3, 0xffff);
959 		ERR2_FSB_WR(3, 0xffff);
960 		MCERR_FSB_WR(3, 0xffff);
961 		EMASK_FSB_WR(3, 0xffff);
962 
963 		ERR0_FSB_WR(2, err0_fsb);
964 		ERR1_FSB_WR(2, err1_fsb);
965 		ERR2_FSB_WR(2, err2_fsb);
966 		MCERR_FSB_WR(2, mcerr_fsb);
967 		if (nb5000_reset_emask_fsb) {
968 			EMASK_FSB_WR(2, nb5000_emask_fsb);
969 		} else {
970 			EMASK_FSB_WR(2, nb_emask_fsb);
971 		}
972 
973 		ERR0_FSB_WR(3, err0_fsb);
974 		ERR1_FSB_WR(3, err1_fsb);
975 		ERR2_FSB_WR(3, err2_fsb);
976 		MCERR_FSB_WR(3, mcerr_fsb);
977 		if (nb5000_reset_emask_fsb) {
978 			EMASK_FSB_WR(3, nb5000_emask_fsb);
979 		} else {
980 			EMASK_FSB_WR(3, nb_emask_fsb);
981 		}
982 	}
983 }
984 
985 static void
986 nb_fsb_fini() {
987 	ERR0_FSB_WR(0, 0xffff);
988 	ERR1_FSB_WR(0, 0xffff);
989 	ERR2_FSB_WR(0, 0xffff);
990 	MCERR_FSB_WR(0, 0xffff);
991 	EMASK_FSB_WR(0, 0xffff);
992 
993 	ERR0_FSB_WR(0, nb_err0_fsb);
994 	ERR1_FSB_WR(0, nb_err1_fsb);
995 	ERR2_FSB_WR(0, nb_err2_fsb);
996 	MCERR_FSB_WR(0, nb_mcerr_fsb);
997 	EMASK_FSB_WR(0, nb_emask_fsb);
998 
999 	ERR0_FSB_WR(1, 0xffff);
1000 	ERR1_FSB_WR(1, 0xffff);
1001 	ERR2_FSB_WR(1, 0xffff);
1002 	MCERR_FSB_WR(1, 0xffff);
1003 	EMASK_FSB_WR(1, 0xffff);
1004 
1005 	ERR0_FSB_WR(1, nb_err0_fsb);
1006 	ERR1_FSB_WR(1, nb_err1_fsb);
1007 	ERR2_FSB_WR(1, nb_err2_fsb);
1008 	MCERR_FSB_WR(1, nb_mcerr_fsb);
1009 	EMASK_FSB_WR(1, nb_emask_fsb);
1010 
1011 	if (nb_chipset == INTEL_NB_7300) {
1012 		ERR0_FSB_WR(2, 0xffff);
1013 		ERR1_FSB_WR(2, 0xffff);
1014 		ERR2_FSB_WR(2, 0xffff);
1015 		MCERR_FSB_WR(2, 0xffff);
1016 		EMASK_FSB_WR(2, 0xffff);
1017 
1018 		ERR0_FSB_WR(2, nb_err0_fsb);
1019 		ERR1_FSB_WR(2, nb_err1_fsb);
1020 		ERR2_FSB_WR(2, nb_err2_fsb);
1021 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1022 		EMASK_FSB_WR(2, nb_emask_fsb);
1023 
1024 		ERR0_FSB_WR(3, 0xffff);
1025 		ERR1_FSB_WR(3, 0xffff);
1026 		ERR2_FSB_WR(3, 0xffff);
1027 		MCERR_FSB_WR(3, 0xffff);
1028 		EMASK_FSB_WR(3, 0xffff);
1029 
1030 		ERR0_FSB_WR(3, nb_err0_fsb);
1031 		ERR1_FSB_WR(3, nb_err1_fsb);
1032 		ERR2_FSB_WR(3, nb_err2_fsb);
1033 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1034 		EMASK_FSB_WR(3, nb_emask_fsb);
1035 	}
1036 }
1037 
1038 void
1039 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1040 {
1041 	uint16_t emask_fsb;
1042 
1043 	emask_fsb = MCERR_FSB_RD(fsb);
1044 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1045 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1046 		nb_mask_mc_set = 1;
1047 	}
1048 }
1049 
1050 static void
1051 nb_thr_init()
1052 {
1053 	uint16_t err0_thr;
1054 	uint16_t err1_thr;
1055 	uint16_t err2_thr;
1056 	uint16_t mcerr_thr;
1057 	uint16_t emask_thr;
1058 
1059 	if (nb_chipset == INTEL_NB_5400) {
1060 		err0_thr = ERR0_THR_RD(0);
1061 		err1_thr = ERR1_THR_RD(0);
1062 		err2_thr = ERR2_THR_RD(0);
1063 		mcerr_thr = MCERR_THR_RD(0);
1064 		emask_thr = EMASK_THR_RD(0);
1065 
1066 		ERR0_THR_WR(0xffff);
1067 		ERR1_THR_WR(0xffff);
1068 		ERR2_THR_WR(0xffff);
1069 		MCERR_THR_WR(0xffff);
1070 		EMASK_THR_WR(0xffff);
1071 
1072 		nb_err0_thr = err0_thr;
1073 		nb_err1_thr = err1_thr;
1074 		nb_err2_thr = err2_thr;
1075 		nb_mcerr_thr = mcerr_thr;
1076 		nb_emask_thr = emask_thr;
1077 
1078 		mcerr_thr &= ~nb_mask_bios_thr;
1079 		mcerr_thr |= nb_mask_bios_thr &
1080 		    (~err2_thr | ~err1_thr | ~err0_thr);
1081 		mcerr_thr |= nb_mask_poll_thr;
1082 		err0_thr |= nb_mask_poll_thr;
1083 		err1_thr |= nb_mask_poll_thr;
1084 		err2_thr |= nb_mask_poll_thr;
1085 
1086 		l_mcerr_thr = mcerr_thr;
1087 		ERR0_THR_WR(err0_thr);
1088 		ERR1_THR_WR(err1_thr);
1089 		ERR2_THR_WR(err2_thr);
1090 		MCERR_THR_WR(mcerr_thr);
1091 		EMASK_THR_WR(nb_emask_thr);
1092 	}
1093 }
1094 
1095 static void
1096 nb_thr_fini()
1097 {
1098 	if (nb_chipset == INTEL_NB_5400) {
1099 		ERR0_THR_WR(0xffff);
1100 		ERR1_THR_WR(0xffff);
1101 		ERR2_THR_WR(0xffff);
1102 		MCERR_THR_WR(0xffff);
1103 		EMASK_THR_WR(0xffff);
1104 
1105 		ERR0_THR_WR(nb_err0_thr);
1106 		ERR1_THR_WR(nb_err1_thr);
1107 		ERR2_THR_WR(nb_err2_thr);
1108 		MCERR_THR_WR(nb_mcerr_thr);
1109 		EMASK_THR_WR(nb_emask_thr);
1110 	}
1111 }
1112 
1113 void
1114 nb_thr_mask_mc(uint16_t mc_mask_thr)
1115 {
1116 	uint16_t emask_thr;
1117 
1118 	emask_thr = MCERR_THR_RD(0);
1119 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1120 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1121 		nb_mask_mc_set = 1;
1122 	}
1123 }
1124 
1125 void
1126 nb_mask_mc_reset()
1127 {
1128 	MCERR_FBD_WR(l_mcerr_fbd);
1129 	MCERR_INT_WR(l_mcerr_int);
1130 	MCERR_FSB_WR(0, l_mcerr_fsb);
1131 	MCERR_FSB_WR(1, l_mcerr_fsb);
1132 	if (nb_chipset == INTEL_NB_7300) {
1133 		MCERR_FSB_WR(2, l_mcerr_fsb);
1134 		MCERR_FSB_WR(3, l_mcerr_fsb);
1135 	}
1136 	if (nb_chipset == INTEL_NB_5400) {
1137 		MCERR_THR_WR(l_mcerr_thr);
1138 	}
1139 }
1140 
1141 int
1142 nb_dev_init()
1143 {
1144 	find_dimm_label_t *label_function_p;
1145 
1146 	label_function_p = find_dimms_per_channel();
1147 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1148 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1149 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1150 	if (nb_queue == NULL) {
1151 		mutex_destroy(&nb_mutex);
1152 		return (EAGAIN);
1153 	}
1154 	nb_int_init();
1155 	nb_thr_init();
1156 	dimm_init();
1157 	nb_dimms_init(label_function_p);
1158 	nb_mc_init();
1159 	nb_pex_init();
1160 	nb_fbd_init();
1161 	nb_fsb_init();
1162 	nb_scrubber_enable();
1163 	return (0);
1164 }
1165 
1166 int
1167 nb_init()
1168 {
1169 	/* return ENOTSUP if there is no PCI config space support. */
1170 	if (pci_getl_func == NULL)
1171 		return (ENOTSUP);
1172 
1173 	/* get vendor and device */
1174 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1175 	switch (nb_chipset) {
1176 	default:
1177 		if (nb_5000_memory_controller == 0)
1178 			return (ENOTSUP);
1179 		break;
1180 	case INTEL_NB_7300:
1181 	case INTEL_NB_5000P:
1182 	case INTEL_NB_5000X:
1183 		break;
1184 	case INTEL_NB_5000V:
1185 	case INTEL_NB_5000Z:
1186 		nb_number_memory_controllers = 1;
1187 		break;
1188 	case INTEL_NB_5400:
1189 	case INTEL_NB_5400A:
1190 	case INTEL_NB_5400B:
1191 		nb_chipset = INTEL_NB_5400;
1192 		break;
1193 	}
1194 	return (0);
1195 }
1196 
1197 void
1198 nb_dev_reinit()
1199 {
1200 	int i, j;
1201 	int nchannels = nb_number_memory_controllers * 2;
1202 	nb_dimm_t **dimmpp;
1203 	nb_dimm_t *dimmp;
1204 	nb_dimm_t **old_nb_dimms;
1205 	int old_nb_dimms_per_channel;
1206 	find_dimm_label_t *label_function_p;
1207 	int dimm_slot = nb_dimm_slots;
1208 
1209 	old_nb_dimms = nb_dimms;
1210 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1211 
1212 	dimm_fini();
1213 	nb_dimms_per_channel = 0;
1214 	label_function_p = find_dimms_per_channel();
1215 	dimm_init();
1216 	nb_dimms_init(label_function_p);
1217 	nb_mc_init();
1218 	nb_pex_init();
1219 	nb_int_init();
1220 	nb_thr_init();
1221 	nb_fbd_init();
1222 	nb_fsb_init();
1223 	nb_scrubber_enable();
1224 
1225 	dimmpp = old_nb_dimms;
1226 	for (i = 0; i < nchannels; i++) {
1227 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1228 			dimmp = *dimmpp;
1229 			if (dimmp) {
1230 				kmem_free(dimmp, sizeof (nb_dimm_t));
1231 				*dimmpp = NULL;
1232 			}
1233 			dimmp++;
1234 		}
1235 	}
1236 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1237 }
1238 
1239 void
1240 nb_dev_unload()
1241 {
1242 	errorq_destroy(nb_queue);
1243 	nb_queue = NULL;
1244 	mutex_destroy(&nb_mutex);
1245 	nb_int_fini();
1246 	nb_thr_fini();
1247 	nb_fbd_fini();
1248 	nb_fsb_fini();
1249 	nb_pex_fini();
1250 	nb_fini();
1251 }
1252 
1253 void
1254 nb_unload()
1255 {
1256 }
1257