xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c (revision 129b3e6c5b0ac55b5021a4c38db6387b6acdaaf1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_dimms_per_channel = 0;
56 
57 nb_dimm_t **nb_dimms;
58 int nb_ndimm;
59 uint32_t nb_chipset;
60 enum nb_memory_mode nb_mode;
61 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
62 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
63 uint32_t top_of_low_memory;
64 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
65 
66 extern int nb_no_smbios;
67 
68 errorq_t *nb_queue;
69 kmutex_t nb_mutex;
70 
71 static int nb_dimm_slots;
72 
73 static uint32_t nb_err0_int;
74 static uint32_t nb_err1_int;
75 static uint32_t nb_err2_int;
76 static uint32_t nb_mcerr_int;
77 static uint32_t nb_emask_int;
78 
79 static uint32_t nb_err0_fbd;
80 static uint32_t nb_err1_fbd;
81 static uint32_t nb_err2_fbd;
82 static uint32_t nb_mcerr_fbd;
83 static uint32_t nb_emask_fbd;
84 
85 static uint16_t nb_err0_fsb;
86 static uint16_t nb_err1_fsb;
87 static uint16_t nb_err2_fsb;
88 static uint16_t nb_mcerr_fsb;
89 static uint16_t nb_emask_fsb;
90 
91 static uint16_t nb_err0_thr;
92 static uint16_t nb_err1_thr;
93 static uint16_t nb_err2_thr;
94 static uint16_t nb_mcerr_thr;
95 static uint16_t nb_emask_thr;
96 
97 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
98 static uint32_t emask_cor_pex[NB_PCI_DEV];
99 static uint32_t emask_rp_pex[NB_PCI_DEV];
100 static uint32_t docmd_pex[NB_PCI_DEV];
101 static uint32_t uncerrsev[NB_PCI_DEV];
102 
103 static uint32_t l_mcerr_int;
104 static uint32_t l_mcerr_fbd;
105 static uint16_t l_mcerr_fsb;
106 static uint16_t l_mcerr_thr;
107 
108 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
109 uint_t nb5400_emask_fbd = 0;
110 int nb5000_reset_emask_fbd = 1;
111 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
112 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
113 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
114 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
115 
116 uint_t nb5000_emask_fsb = 0;
117 int nb5000_reset_emask_fsb = 1;
118 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
119 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
120 
121 uint_t nb5400_emask_int = EMASK_INT_5400;
122 
123 uint_t nb7300_emask_int = EMASK_INT_7300;
124 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
125 uint_t nb5000_emask_int = EMASK_INT_5000;
126 int nb5000_reset_emask_int = 1;
127 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
128 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
129 
130 uint_t nb_mask_poll_thr = EMASK_THR_NF;
131 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
132 
133 int nb5000_reset_uncor_pex = 0;
134 uint_t nb5000_mask_uncor_pex = 0;
135 int nb5000_reset_cor_pex = 0;
136 uint_t nb5000_mask_cor_pex = 0xffffffff;
137 uint32_t nb5000_rp_pex = 0x1;
138 
139 int nb_mask_mc_set;
140 
141 typedef struct find_dimm_label {
142 	void (*label_function)(int, char *, int);
143 } find_dimm_label_t;
144 
145 static void x8450_dimm_label(int, char *, int);
146 
147 static struct platform_label {
148 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
149 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
150 	find_dimm_label_t dimm_label;
151 	int dimms_per_channel;
152 } platform_label[] = {
153 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
154 	    x8450_dimm_label, 8 },
155 	{ NULL, NULL, NULL, 0 }
156 };
157 
158 static unsigned short
159 read_spd(int bus)
160 {
161 	unsigned short rt = 0;
162 	int branch = bus >> 1;
163 	int channel = bus & 1;
164 
165 	rt = SPD_RD(branch, channel);
166 
167 	return (rt);
168 }
169 
170 static void
171 write_spdcmd(int bus, uint32_t val)
172 {
173 	int branch = bus >> 1;
174 	int channel = bus & 1;
175 	SPDCMD_WR(branch, channel, val);
176 }
177 
178 static int
179 read_spd_eeprom(int bus, int slave, int addr)
180 {
181 	int retry = 4;
182 	int wait;
183 	int spd;
184 	uint32_t cmd;
185 
186 	for (;;) {
187 		wait = 1000;
188 		for (;;) {
189 			spd = read_spd(bus);
190 			if ((spd & SPD_BUSY) == 0)
191 				break;
192 			if (--wait == 0)
193 				return (-1);
194 			drv_usecwait(10);
195 		}
196 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
197 		write_spdcmd(bus, cmd);
198 		wait = 1000;
199 		for (;;) {
200 			spd = read_spd(bus);
201 			if ((spd & SPD_BUSY) == 0)
202 				break;
203 			if (--wait == 0) {
204 				spd = SPD_BUS_ERROR;
205 				break;
206 			}
207 			drv_usecwait(10);
208 		}
209 		while ((spd & SPD_BUS_ERROR) == 0 &&
210 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
211 		    SPD_READ_DATA_VALID) {
212 			spd = read_spd(bus);
213 			if (--wait == 0)
214 				return (-1);
215 		}
216 		if ((spd & SPD_BUS_ERROR) == 0)
217 			break;
218 		if (--retry == 0)
219 			return (-1);
220 	}
221 	return (spd & 0xff);
222 }
223 
224 static void
225 nb_fini()
226 {
227 	int i, j;
228 	int nchannels = nb_number_memory_controllers * 2;
229 	nb_dimm_t **dimmpp;
230 	nb_dimm_t *dimmp;
231 
232 	dimmpp = nb_dimms;
233 	for (i = 0; i < nchannels; i++) {
234 		for (j = 0; j < nb_dimms_per_channel; j++) {
235 			dimmp = *dimmpp;
236 			if (dimmp) {
237 				kmem_free(dimmp, sizeof (nb_dimm_t));
238 				*dimmpp = NULL;
239 			}
240 			dimmp++;
241 		}
242 	}
243 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
244 	nb_dimms = NULL;
245 	dimm_fini();
246 }
247 
248 void
249 nb_scrubber_enable()
250 {
251 	uint32_t mc;
252 
253 	if (!nb_hw_memory_scrub_enable)
254 		return;
255 
256 	mc = MC_RD();
257 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
258 		mc |= MC_PATROL_SCRUB;
259 	else
260 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
261 	MC_WR(mc);
262 
263 	if (nb_sw_scrub_disabled++)
264 		cmi_mc_sw_memscrub_disable();
265 }
266 
267 static nb_dimm_t *
268 nb_dimm_init(int channel, int dimm, uint16_t mtr)
269 {
270 	nb_dimm_t *dp;
271 	int i, t;
272 	int spd_sz;
273 
274 	if (MTR_PRESENT(mtr) == 0)
275 		return (NULL);
276 	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
277 
278 	if (t != 9)
279 		return (NULL);
280 
281 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
282 
283 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
284 	if (t == 1)
285 		spd_sz = 128;
286 	else if (t == 2)
287 		spd_sz = 176;
288 	else
289 		spd_sz = 256;
290 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
291 	    (read_spd_eeprom(channel, dimm, 118) << 8);
292 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
293 	dp->serial_number =
294 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
295 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
296 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
297 	    read_spd_eeprom(channel, dimm, 125);
298 	t = read_spd_eeprom(channel, dimm, 121);
299 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
300 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
301 	if (spd_sz > 128) {
302 		for (i = 0; i < sizeof (dp->part_number); i++) {
303 			dp->part_number[i] =
304 			    read_spd_eeprom(channel, dimm, 128 + i);
305 		}
306 		for (i = 0; i < sizeof (dp->revision); i++) {
307 			dp->revision[i] =
308 			    read_spd_eeprom(channel, dimm, 146 + i);
309 		}
310 	}
311 	dp->mtr_present = MTR_PRESENT(mtr);
312 	dp->nranks = MTR_NUMRANK(mtr);
313 	dp->nbanks = MTR_NUMBANK(mtr);
314 	dp->ncolumn = MTR_NUMCOL(mtr);
315 	dp->nrow = MTR_NUMROW(mtr);
316 	dp->width = MTR_WIDTH(mtr);
317 	dp->dimm_size = MTR_DIMMSIZE(mtr);
318 
319 	return (dp);
320 }
321 
322 static uint64_t
323 mc_range(int controller, uint64_t base)
324 {
325 	int i;
326 	uint64_t limit = 0;
327 
328 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
329 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
330 		    base < nb_banks[i].limit) {
331 			limit = nb_banks[i].limit;
332 			if (base <= top_of_low_memory &&
333 			    limit > top_of_low_memory) {
334 				limit -= TLOW_MAX - top_of_low_memory;
335 			}
336 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
337 			    nb_mode != NB_MEMORY_MIRROR) {
338 				limit = limit / 2;
339 			}
340 		}
341 	}
342 	return (limit);
343 }
344 
345 void
346 nb_mc_init()
347 {
348 	uint16_t tolm;
349 	uint16_t mir;
350 	uint32_t hole_base;
351 	uint32_t hole_size;
352 	uint32_t dmir;
353 	uint64_t base;
354 	uint64_t limit;
355 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
356 	int i, j, k;
357 	uint8_t interleave;
358 
359 	base = 0;
360 	tolm = TOLM_RD();
361 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
362 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
363 		mir = MIR_RD(i);
364 		limit = (uint64_t)(mir >> 4) << 28;
365 		way0 = mir & 1;
366 		way1 = (mir >> 1) & 1;
367 		if (way0 == 0 && way1 == 0) {
368 			way0 = 1;
369 			way1 = 1;
370 		}
371 		if (limit > top_of_low_memory)
372 			limit += TLOW_MAX - top_of_low_memory;
373 		nb_banks[i].base = base;
374 		nb_banks[i].limit = limit;
375 		nb_banks[i].way[0] = way0;
376 		nb_banks[i].way[1] = way1;
377 		base = limit;
378 	}
379 	for (i = 0; i < nb_number_memory_controllers; i++) {
380 		base = 0;
381 
382 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
383 			dmir = DMIR_RD(i, j);
384 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
385 			if (limit == 0) {
386 				limit = mc_range(i, base);
387 			}
388 			branch_interleave = 0;
389 			hole_base = 0;
390 			hole_size = 0;
391 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
392 			if (rank0 == rank1)
393 				interleave = 1;
394 			else if (rank0 == rank2)
395 				interleave = 2;
396 			else
397 				interleave = 4;
398 			if (nb_mode != NB_MEMORY_MIRROR &&
399 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
400 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
401 					if (base >= nb_banks[k].base &&
402 					    base < nb_banks[k].limit) {
403 						if (nb_banks[i].way[0] &&
404 						    nb_banks[i].way[1]) {
405 							interleave *= 2;
406 							limit *= 2;
407 							branch_interleave = 1;
408 						}
409 						break;
410 					}
411 				}
412 			}
413 			if (base < top_of_low_memory &&
414 			    limit > top_of_low_memory) {
415 				hole_base = top_of_low_memory;
416 				hole_size = TLOW_MAX - top_of_low_memory;
417 				limit += hole_size;
418 			} else if (base > top_of_low_memory) {
419 				limit += TLOW_MAX - top_of_low_memory;
420 			}
421 			nb_ranks[i][j].base = base;
422 			nb_ranks[i][j].limit = limit;
423 			nb_ranks[i][j].rank[0] = rank0;
424 			nb_ranks[i][j].rank[1] = rank1;
425 			nb_ranks[i][j].rank[2] = rank2;
426 			nb_ranks[i][j].rank[3] = rank3;
427 			nb_ranks[i][j].interleave = interleave;
428 			nb_ranks[i][j].branch_interleave = branch_interleave;
429 			nb_ranks[i][j].hole_base = hole_base;
430 			nb_ranks[i][j].hole_size = hole_size;
431 			if (limit > base) {
432 				dimm_add_rank(i, rank0, branch_interleave, 0,
433 				    base, hole_base, hole_size, interleave,
434 				    limit);
435 				if (rank0 != rank1) {
436 					dimm_add_rank(i, rank1,
437 					    branch_interleave, 1, base,
438 					    hole_base, hole_size, interleave,
439 					    limit);
440 					if (rank0 != rank2) {
441 						dimm_add_rank(i, rank2,
442 						    branch_interleave, 2, base,
443 						    hole_base, hole_size,
444 						    interleave, limit);
445 						dimm_add_rank(i, rank3,
446 						    branch_interleave, 3, base,
447 						    hole_base, hole_size,
448 						    interleave, limit);
449 					}
450 				}
451 			}
452 			base = limit;
453 		}
454 	}
455 }
456 
457 void
458 nb_used_spare_rank(int branch, int bad_rank)
459 {
460 	int i;
461 	int j;
462 
463 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
464 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
465 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
466 				nb_ranks[branch][i].rank[j] =
467 				    spare_rank[branch];
468 				i = NB_MEM_RANK_SELECT;
469 				break;
470 			}
471 		}
472 	}
473 }
474 
475 find_dimm_label_t *
476 find_dimms_per_channel()
477 {
478 	struct platform_label *pl;
479 	smbios_info_t si;
480 	smbios_system_t sy;
481 	id_t id;
482 	int i, j;
483 	uint16_t mtr;
484 	find_dimm_label_t *rt = NULL;
485 
486 	if (ksmbios != NULL && nb_no_smbios == 0) {
487 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
488 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
489 			for (pl = platform_label; pl->sys_vendor; pl++) {
490 				if (strncmp(pl->sys_vendor,
491 				    si.smbi_manufacturer,
492 				    strlen(pl->sys_vendor)) == 0 &&
493 				    strncmp(pl->sys_product, si.smbi_product,
494 				    strlen(pl->sys_product)) == 0) {
495 					nb_dimms_per_channel =
496 					    pl->dimms_per_channel;
497 					rt = &pl->dimm_label;
498 					break;
499 				}
500 			}
501 		}
502 	}
503 	if (nb_dimms_per_channel == 0) {
504 		/*
505 		 * Scan all memory channels if we find a channel which has more
506 		 * dimms then we have seen before set nb_dimms_per_channel to
507 		 * the number of dimms on the channel
508 		 */
509 		for (i = 0; i < nb_number_memory_controllers; i++) {
510 			for (j = nb_dimms_per_channel;
511 			    j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
512 				mtr = MTR_RD(i, j);
513 				if (MTR_PRESENT(mtr))
514 					nb_dimms_per_channel = j + 1;
515 			}
516 		}
517 	}
518 	return (rt);
519 }
520 
521 struct smb_dimm_rec {
522 	int dimms;
523 	int slots;
524 	int populated;
525 	nb_dimm_t **dimmpp;
526 };
527 
528 static int
529 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
530 {
531 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
532 	nb_dimm_t ***dimmpp;
533 	nb_dimm_t *dimmp;
534 	smbios_memdevice_t md;
535 
536 	dimmpp = &rp->dimmpp;
537 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
538 		if (*dimmpp >= &nb_dimms[nb_dimm_slots])
539 			return (-1);
540 		dimmp = **dimmpp;
541 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
542 		    md.smbmd_dloc != NULL) {
543 			if (md.smbmd_size) {
544 				if (dimmp == NULL &&
545 				    (rp->slots == nb_dimm_slots ||
546 				    rp->dimms < rp->populated)) {
547 					(*dimmpp)++;
548 					return (0);
549 				}
550 				/*
551 				 * if there is no physical dimm for this smbios
552 				 * record it is because this system has less
553 				 * physical slots than the controller supports
554 				 * so skip empty slots to find the slot this
555 				 * smbios record belongs too
556 				 */
557 				while (dimmp == NULL) {
558 					(*dimmpp)++;
559 					if (*dimmpp >= &nb_dimms[nb_dimm_slots])
560 						return (-1);
561 					dimmp = **dimmpp;
562 				}
563 				(void) snprintf(dimmp->label,
564 				    sizeof (dimmp->label), "%s", md.smbmd_dloc);
565 				(*dimmpp)++;
566 			}
567 		}
568 	}
569 	return (0);
570 }
571 
572 static int
573 check_memdevice(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
574 {
575 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
576 	smbios_memdevice_t md;
577 
578 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
579 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0) {
580 			rp->slots++;
581 			if (md.smbmd_size) {
582 				rp->populated++;
583 			}
584 		}
585 	}
586 	return (0);
587 }
588 
589 void
590 nb_smbios()
591 {
592 	struct smb_dimm_rec r;
593 	int i;
594 
595 	if (ksmbios != NULL && nb_no_smbios == 0) {
596 		r.dimms = 0;
597 		r.slots = 0;
598 		r.populated = 0;
599 		r.dimmpp = nb_dimms;
600 		for (i = 0; i < nb_dimm_slots; i++) {
601 			if (nb_dimms[i] != NULL)
602 				r.dimms++;
603 		}
604 		(void) smbios_iter(ksmbios, check_memdevice, &r);
605 		(void) smbios_iter(ksmbios, dimm_label, &r);
606 	}
607 }
608 
609 static void
610 x8450_dimm_label(int dimm, char *label, int label_sz)
611 {
612 	int channel = dimm >> 3;
613 
614 	dimm = dimm & 0x7;
615 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
616 }
617 
618 static void
619 nb_dimms_init(find_dimm_label_t *label_function)
620 {
621 	int i, j, k, l;
622 	uint16_t mtr;
623 	uint32_t mc, mca;
624 	uint32_t spcpc;
625 	uint8_t spcps;
626 	nb_dimm_t **dimmpp;
627 
628 	mca = MCA_RD();
629 	mc = MC_RD();
630 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
631 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
632 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
633 		nb_mode = NB_MEMORY_MIRROR;
634 	else
635 		nb_mode = NB_MEMORY_NORMAL;
636 	nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
637 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
638 	    nb_dimm_slots, KM_SLEEP);
639 	dimmpp = nb_dimms;
640 	for (i = 0; i < nb_number_memory_controllers; i++) {
641 		if (nb_mode == NB_MEMORY_NORMAL) {
642 			spcpc = SPCPC_RD(i);
643 			spcps = SPCPS_RD(i);
644 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
645 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
646 				nb_mode = NB_MEMORY_SPARE_RANK;
647 			spare_rank[i] = SPCPC_SPRANK(spcpc);
648 		}
649 		for (j = 0; j < nb_dimms_per_channel; j++) {
650 			mtr = MTR_RD(i, j);
651 			k = i * 2;
652 			dimmpp[j] = nb_dimm_init(k, j, mtr);
653 			if (dimmpp[j]) {
654 				nb_ndimm ++;
655 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
656 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
657 				    dimmpp[j]->nrow);
658 				if (label_function) {
659 					label_function->label_function(
660 					    (k * nb_dimms_per_channel) + j,
661 					    dimmpp[j]->label,
662 					    sizeof (dimmpp[j]->label));
663 				}
664 			}
665 			dimmpp[j + nb_dimms_per_channel] =
666 			    nb_dimm_init(k + 1, j, mtr);
667 			l = j + nb_dimms_per_channel;
668 			if (dimmpp[l]) {
669 				if (label_function) {
670 					label_function->label_function(
671 					    (k * nb_dimms_per_channel) + l,
672 					    dimmpp[l]->label,
673 					    sizeof (dimmpp[l]->label));
674 				}
675 				nb_ndimm ++;
676 			}
677 		}
678 		dimmpp += nb_dimms_per_channel * 2;
679 	}
680 	if (label_function == NULL)
681 		nb_smbios();
682 }
683 
684 
685 /* Setup the ESI port registers to enable SERR for southbridge */
686 static void
687 nb_pex_init()
688 {
689 	int i = 0; /* ESI port */
690 	uint16_t regw;
691 
692 	emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
693 	emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
694 	emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
695 	docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
696 	uncerrsev[i] = UNCERRSEV_RD(i);
697 
698 	if (nb5000_reset_uncor_pex)
699 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
700 	if (nb5000_reset_cor_pex)
701 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
702 	if (nb_chipset == INTEL_NB_5400) {
703 		/* disable masking of ERR pins used by DOCMD */
704 		PEX_ERR_PIN_MASK_WR(i, 0x10);
705 	}
706 
707 	/* RP error message (CE/NFE/FE) detect mask */
708 	EMASK_RP_PEX_WR(i, nb5000_rp_pex);
709 
710 	/* Command Register - Enable SERR */
711 	regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
712 	nb_pci_putw(0, i, 0, PCI_CONF_COMM,
713 	    regw | PCI_COMM_SERR_ENABLE);
714 
715 	/* Root Control Register - SERR on NFE/FE */
716 	PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
717 	    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
718 
719 	/* AER UE Mask - Mask UR */
720 	UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
721 }
722 
723 static void
724 nb_pex_fini()
725 {
726 	int i = 0; /* ESI port */
727 
728 	EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
729 	EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
730 	EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
731 	PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
732 
733 	if (nb5000_reset_uncor_pex)
734 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
735 	if (nb5000_reset_cor_pex)
736 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
737 }
738 
739 void
740 nb_int_init()
741 {
742 	uint32_t err0_int;
743 	uint32_t err1_int;
744 	uint32_t err2_int;
745 	uint32_t mcerr_int;
746 	uint32_t emask_int;
747 	uint16_t stepping;
748 
749 	err0_int = ERR0_INT_RD();
750 	err1_int = ERR1_INT_RD();
751 	err2_int = ERR2_INT_RD();
752 	mcerr_int = MCERR_INT_RD();
753 	emask_int = EMASK_INT_RD();
754 
755 	nb_err0_int = err0_int;
756 	nb_err1_int = err1_int;
757 	nb_err2_int = err2_int;
758 	nb_mcerr_int = mcerr_int;
759 	nb_emask_int = emask_int;
760 
761 	ERR0_INT_WR(ERR_INT_ALL);
762 	ERR1_INT_WR(ERR_INT_ALL);
763 	ERR2_INT_WR(ERR_INT_ALL);
764 	MCERR_INT_WR(ERR_INT_ALL);
765 	EMASK_INT_WR(ERR_INT_ALL);
766 
767 	mcerr_int &= ~nb5000_mask_bios_int;
768 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
769 	mcerr_int |= nb5000_mask_poll_int;
770 	err0_int |= nb5000_mask_poll_int;
771 	err1_int |= nb5000_mask_poll_int;
772 	err2_int |= nb5000_mask_poll_int;
773 
774 	l_mcerr_int = mcerr_int;
775 	ERR0_INT_WR(err0_int);
776 	ERR1_INT_WR(err1_int);
777 	ERR2_INT_WR(err2_int);
778 	MCERR_INT_WR(mcerr_int);
779 	if (nb5000_reset_emask_int) {
780 		if (nb_chipset == INTEL_NB_7300) {
781 			stepping = NB5000_STEPPING();
782 			if (stepping == 0)
783 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
784 			else
785 				EMASK_5000_INT_WR(nb7300_emask_int);
786 		} else if (nb_chipset == INTEL_NB_5400) {
787 			EMASK_5400_INT_WR(nb5400_emask_int |
788 			    (emask_int & EMASK_INT_RES));
789 		} else {
790 			EMASK_5000_INT_WR(nb5000_emask_int);
791 		}
792 	} else {
793 		EMASK_INT_WR(nb_emask_int);
794 	}
795 }
796 
797 void
798 nb_int_fini()
799 {
800 	ERR0_INT_WR(ERR_INT_ALL);
801 	ERR1_INT_WR(ERR_INT_ALL);
802 	ERR2_INT_WR(ERR_INT_ALL);
803 	MCERR_INT_WR(ERR_INT_ALL);
804 	EMASK_INT_WR(ERR_INT_ALL);
805 
806 	ERR0_INT_WR(nb_err0_int);
807 	ERR1_INT_WR(nb_err1_int);
808 	ERR2_INT_WR(nb_err2_int);
809 	MCERR_INT_WR(nb_mcerr_int);
810 	EMASK_INT_WR(nb_emask_int);
811 }
812 
813 void
814 nb_int_mask_mc(uint32_t mc_mask_int)
815 {
816 	uint32_t emask_int;
817 
818 	emask_int = MCERR_INT_RD();
819 	if ((emask_int & mc_mask_int) != mc_mask_int) {
820 		MCERR_INT_WR(emask_int|mc_mask_int);
821 		nb_mask_mc_set = 1;
822 	}
823 }
824 
825 void
826 nb_fbd_init()
827 {
828 	uint32_t err0_fbd;
829 	uint32_t err1_fbd;
830 	uint32_t err2_fbd;
831 	uint32_t mcerr_fbd;
832 	uint32_t emask_fbd;
833 	uint32_t emask_bios_fbd;
834 	uint32_t emask_poll_fbd;
835 
836 	err0_fbd = ERR0_FBD_RD();
837 	err1_fbd = ERR1_FBD_RD();
838 	err2_fbd = ERR2_FBD_RD();
839 	mcerr_fbd = MCERR_FBD_RD();
840 	emask_fbd = EMASK_FBD_RD();
841 
842 	nb_err0_fbd = err0_fbd;
843 	nb_err1_fbd = err1_fbd;
844 	nb_err2_fbd = err2_fbd;
845 	nb_mcerr_fbd = mcerr_fbd;
846 	nb_emask_fbd = emask_fbd;
847 
848 	ERR0_FBD_WR(0xffffffff);
849 	ERR1_FBD_WR(0xffffffff);
850 	ERR2_FBD_WR(0xffffffff);
851 	MCERR_FBD_WR(0xffffffff);
852 	EMASK_FBD_WR(0xffffffff);
853 
854 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
855 		/* MCH 7300 errata 34 */
856 		emask_bios_fbd = nb5000_mask_bios_fbd & ~EMASK_FBD_M23;
857 		emask_poll_fbd = nb5000_mask_poll_fbd;
858 		mcerr_fbd |= EMASK_FBD_M23;
859 	} else if (nb_chipset == INTEL_NB_5400) {
860 		emask_bios_fbd = nb5400_mask_bios_fbd;
861 		emask_poll_fbd = nb5400_mask_poll_fbd;
862 	} else {
863 		emask_bios_fbd = nb5000_mask_bios_fbd;
864 		emask_poll_fbd = nb5000_mask_poll_fbd;
865 	}
866 	mcerr_fbd &= ~emask_bios_fbd;
867 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
868 	mcerr_fbd |= emask_poll_fbd;
869 	err0_fbd |= emask_poll_fbd;
870 	err1_fbd |= emask_poll_fbd;
871 	err2_fbd |= emask_poll_fbd;
872 
873 	l_mcerr_fbd = mcerr_fbd;
874 	ERR0_FBD_WR(err0_fbd);
875 	ERR1_FBD_WR(err1_fbd);
876 	ERR2_FBD_WR(err2_fbd);
877 	MCERR_FBD_WR(mcerr_fbd);
878 	if (nb5000_reset_emask_fbd) {
879 		if (nb_chipset == INTEL_NB_5400)
880 			EMASK_FBD_WR(nb5400_emask_fbd);
881 		else
882 			EMASK_FBD_WR(nb5000_emask_fbd);
883 	} else {
884 		EMASK_FBD_WR(nb_emask_fbd);
885 	}
886 }
887 
888 void
889 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
890 {
891 	uint32_t emask_fbd;
892 
893 	emask_fbd = MCERR_FBD_RD();
894 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
895 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
896 		nb_mask_mc_set = 1;
897 	}
898 }
899 
900 void
901 nb_fbd_fini()
902 {
903 	ERR0_FBD_WR(0xffffffff);
904 	ERR1_FBD_WR(0xffffffff);
905 	ERR2_FBD_WR(0xffffffff);
906 	MCERR_FBD_WR(0xffffffff);
907 	EMASK_FBD_WR(0xffffffff);
908 
909 	ERR0_FBD_WR(nb_err0_fbd);
910 	ERR1_FBD_WR(nb_err1_fbd);
911 	ERR2_FBD_WR(nb_err2_fbd);
912 	MCERR_FBD_WR(nb_mcerr_fbd);
913 	EMASK_FBD_WR(nb_emask_fbd);
914 }
915 
916 static void
917 nb_fsb_init()
918 {
919 	uint16_t err0_fsb;
920 	uint16_t err1_fsb;
921 	uint16_t err2_fsb;
922 	uint16_t mcerr_fsb;
923 	uint16_t emask_fsb;
924 
925 	err0_fsb = ERR0_FSB_RD(0);
926 	err1_fsb = ERR1_FSB_RD(0);
927 	err2_fsb = ERR2_FSB_RD(0);
928 	mcerr_fsb = MCERR_FSB_RD(0);
929 	emask_fsb = EMASK_FSB_RD(0);
930 
931 	ERR0_FSB_WR(0, 0xffff);
932 	ERR1_FSB_WR(0, 0xffff);
933 	ERR2_FSB_WR(0, 0xffff);
934 	MCERR_FSB_WR(0, 0xffff);
935 	EMASK_FSB_WR(0, 0xffff);
936 
937 	ERR0_FSB_WR(1, 0xffff);
938 	ERR1_FSB_WR(1, 0xffff);
939 	ERR2_FSB_WR(1, 0xffff);
940 	MCERR_FSB_WR(1, 0xffff);
941 	EMASK_FSB_WR(1, 0xffff);
942 
943 	nb_err0_fsb = err0_fsb;
944 	nb_err1_fsb = err1_fsb;
945 	nb_err2_fsb = err2_fsb;
946 	nb_mcerr_fsb = mcerr_fsb;
947 	nb_emask_fsb = emask_fsb;
948 
949 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
950 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
951 	mcerr_fsb |= nb5000_mask_poll_fsb;
952 	err0_fsb |= nb5000_mask_poll_fsb;
953 	err1_fsb |= nb5000_mask_poll_fsb;
954 	err2_fsb |= nb5000_mask_poll_fsb;
955 
956 	l_mcerr_fsb = mcerr_fsb;
957 	ERR0_FSB_WR(0, err0_fsb);
958 	ERR1_FSB_WR(0, err1_fsb);
959 	ERR2_FSB_WR(0, err2_fsb);
960 	MCERR_FSB_WR(0, mcerr_fsb);
961 	if (nb5000_reset_emask_fsb) {
962 		EMASK_FSB_WR(0, nb5000_emask_fsb);
963 	} else {
964 		EMASK_FSB_WR(0, nb_emask_fsb);
965 	}
966 
967 	ERR0_FSB_WR(1, err0_fsb);
968 	ERR1_FSB_WR(1, err1_fsb);
969 	ERR2_FSB_WR(1, err2_fsb);
970 	MCERR_FSB_WR(1, mcerr_fsb);
971 	if (nb5000_reset_emask_fsb) {
972 		EMASK_FSB_WR(1, nb5000_emask_fsb);
973 	} else {
974 		EMASK_FSB_WR(1, nb_emask_fsb);
975 	}
976 
977 	if (nb_chipset == INTEL_NB_7300) {
978 		ERR0_FSB_WR(2, 0xffff);
979 		ERR1_FSB_WR(2, 0xffff);
980 		ERR2_FSB_WR(2, 0xffff);
981 		MCERR_FSB_WR(2, 0xffff);
982 		EMASK_FSB_WR(2, 0xffff);
983 
984 		ERR0_FSB_WR(3, 0xffff);
985 		ERR1_FSB_WR(3, 0xffff);
986 		ERR2_FSB_WR(3, 0xffff);
987 		MCERR_FSB_WR(3, 0xffff);
988 		EMASK_FSB_WR(3, 0xffff);
989 
990 		ERR0_FSB_WR(2, err0_fsb);
991 		ERR1_FSB_WR(2, err1_fsb);
992 		ERR2_FSB_WR(2, err2_fsb);
993 		MCERR_FSB_WR(2, mcerr_fsb);
994 		if (nb5000_reset_emask_fsb) {
995 			EMASK_FSB_WR(2, nb5000_emask_fsb);
996 		} else {
997 			EMASK_FSB_WR(2, nb_emask_fsb);
998 		}
999 
1000 		ERR0_FSB_WR(3, err0_fsb);
1001 		ERR1_FSB_WR(3, err1_fsb);
1002 		ERR2_FSB_WR(3, err2_fsb);
1003 		MCERR_FSB_WR(3, mcerr_fsb);
1004 		if (nb5000_reset_emask_fsb) {
1005 			EMASK_FSB_WR(3, nb5000_emask_fsb);
1006 		} else {
1007 			EMASK_FSB_WR(3, nb_emask_fsb);
1008 		}
1009 	}
1010 }
1011 
1012 static void
1013 nb_fsb_fini() {
1014 	ERR0_FSB_WR(0, 0xffff);
1015 	ERR1_FSB_WR(0, 0xffff);
1016 	ERR2_FSB_WR(0, 0xffff);
1017 	MCERR_FSB_WR(0, 0xffff);
1018 	EMASK_FSB_WR(0, 0xffff);
1019 
1020 	ERR0_FSB_WR(0, nb_err0_fsb);
1021 	ERR1_FSB_WR(0, nb_err1_fsb);
1022 	ERR2_FSB_WR(0, nb_err2_fsb);
1023 	MCERR_FSB_WR(0, nb_mcerr_fsb);
1024 	EMASK_FSB_WR(0, nb_emask_fsb);
1025 
1026 	ERR0_FSB_WR(1, 0xffff);
1027 	ERR1_FSB_WR(1, 0xffff);
1028 	ERR2_FSB_WR(1, 0xffff);
1029 	MCERR_FSB_WR(1, 0xffff);
1030 	EMASK_FSB_WR(1, 0xffff);
1031 
1032 	ERR0_FSB_WR(1, nb_err0_fsb);
1033 	ERR1_FSB_WR(1, nb_err1_fsb);
1034 	ERR2_FSB_WR(1, nb_err2_fsb);
1035 	MCERR_FSB_WR(1, nb_mcerr_fsb);
1036 	EMASK_FSB_WR(1, nb_emask_fsb);
1037 
1038 	if (nb_chipset == INTEL_NB_7300) {
1039 		ERR0_FSB_WR(2, 0xffff);
1040 		ERR1_FSB_WR(2, 0xffff);
1041 		ERR2_FSB_WR(2, 0xffff);
1042 		MCERR_FSB_WR(2, 0xffff);
1043 		EMASK_FSB_WR(2, 0xffff);
1044 
1045 		ERR0_FSB_WR(2, nb_err0_fsb);
1046 		ERR1_FSB_WR(2, nb_err1_fsb);
1047 		ERR2_FSB_WR(2, nb_err2_fsb);
1048 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1049 		EMASK_FSB_WR(2, nb_emask_fsb);
1050 
1051 		ERR0_FSB_WR(3, 0xffff);
1052 		ERR1_FSB_WR(3, 0xffff);
1053 		ERR2_FSB_WR(3, 0xffff);
1054 		MCERR_FSB_WR(3, 0xffff);
1055 		EMASK_FSB_WR(3, 0xffff);
1056 
1057 		ERR0_FSB_WR(3, nb_err0_fsb);
1058 		ERR1_FSB_WR(3, nb_err1_fsb);
1059 		ERR2_FSB_WR(3, nb_err2_fsb);
1060 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1061 		EMASK_FSB_WR(3, nb_emask_fsb);
1062 	}
1063 }
1064 
1065 void
1066 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1067 {
1068 	uint16_t emask_fsb;
1069 
1070 	emask_fsb = MCERR_FSB_RD(fsb);
1071 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1072 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1073 		nb_mask_mc_set = 1;
1074 	}
1075 }
1076 
1077 static void
1078 nb_thr_init()
1079 {
1080 	uint16_t err0_thr;
1081 	uint16_t err1_thr;
1082 	uint16_t err2_thr;
1083 	uint16_t mcerr_thr;
1084 	uint16_t emask_thr;
1085 
1086 	if (nb_chipset == INTEL_NB_5400) {
1087 		err0_thr = ERR0_THR_RD(0);
1088 		err1_thr = ERR1_THR_RD(0);
1089 		err2_thr = ERR2_THR_RD(0);
1090 		mcerr_thr = MCERR_THR_RD(0);
1091 		emask_thr = EMASK_THR_RD(0);
1092 
1093 		ERR0_THR_WR(0xffff);
1094 		ERR1_THR_WR(0xffff);
1095 		ERR2_THR_WR(0xffff);
1096 		MCERR_THR_WR(0xffff);
1097 		EMASK_THR_WR(0xffff);
1098 
1099 		nb_err0_thr = err0_thr;
1100 		nb_err1_thr = err1_thr;
1101 		nb_err2_thr = err2_thr;
1102 		nb_mcerr_thr = mcerr_thr;
1103 		nb_emask_thr = emask_thr;
1104 
1105 		mcerr_thr &= ~nb_mask_bios_thr;
1106 		mcerr_thr |= nb_mask_bios_thr &
1107 		    (~err2_thr | ~err1_thr | ~err0_thr);
1108 		mcerr_thr |= nb_mask_poll_thr;
1109 		err0_thr |= nb_mask_poll_thr;
1110 		err1_thr |= nb_mask_poll_thr;
1111 		err2_thr |= nb_mask_poll_thr;
1112 
1113 		l_mcerr_thr = mcerr_thr;
1114 		ERR0_THR_WR(err0_thr);
1115 		ERR1_THR_WR(err1_thr);
1116 		ERR2_THR_WR(err2_thr);
1117 		MCERR_THR_WR(mcerr_thr);
1118 		EMASK_THR_WR(nb_emask_thr);
1119 	}
1120 }
1121 
1122 static void
1123 nb_thr_fini()
1124 {
1125 	if (nb_chipset == INTEL_NB_5400) {
1126 		ERR0_THR_WR(0xffff);
1127 		ERR1_THR_WR(0xffff);
1128 		ERR2_THR_WR(0xffff);
1129 		MCERR_THR_WR(0xffff);
1130 		EMASK_THR_WR(0xffff);
1131 
1132 		ERR0_THR_WR(nb_err0_thr);
1133 		ERR1_THR_WR(nb_err1_thr);
1134 		ERR2_THR_WR(nb_err2_thr);
1135 		MCERR_THR_WR(nb_mcerr_thr);
1136 		EMASK_THR_WR(nb_emask_thr);
1137 	}
1138 }
1139 
1140 void
1141 nb_thr_mask_mc(uint16_t mc_mask_thr)
1142 {
1143 	uint16_t emask_thr;
1144 
1145 	emask_thr = MCERR_THR_RD(0);
1146 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1147 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1148 		nb_mask_mc_set = 1;
1149 	}
1150 }
1151 
1152 void
1153 nb_mask_mc_reset()
1154 {
1155 	MCERR_FBD_WR(l_mcerr_fbd);
1156 	MCERR_INT_WR(l_mcerr_int);
1157 	MCERR_FSB_WR(0, l_mcerr_fsb);
1158 	MCERR_FSB_WR(1, l_mcerr_fsb);
1159 	if (nb_chipset == INTEL_NB_7300) {
1160 		MCERR_FSB_WR(2, l_mcerr_fsb);
1161 		MCERR_FSB_WR(3, l_mcerr_fsb);
1162 	}
1163 	if (nb_chipset == INTEL_NB_5400) {
1164 		MCERR_THR_WR(l_mcerr_thr);
1165 	}
1166 }
1167 
1168 int
1169 nb_dev_init()
1170 {
1171 	find_dimm_label_t *label_function_p;
1172 
1173 	label_function_p = find_dimms_per_channel();
1174 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1175 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1176 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1177 	if (nb_queue == NULL) {
1178 		mutex_destroy(&nb_mutex);
1179 		return (EAGAIN);
1180 	}
1181 	nb_int_init();
1182 	nb_thr_init();
1183 	dimm_init();
1184 	nb_dimms_init(label_function_p);
1185 	nb_mc_init();
1186 	nb_pex_init();
1187 	nb_fbd_init();
1188 	nb_fsb_init();
1189 	nb_scrubber_enable();
1190 	return (0);
1191 }
1192 
1193 int
1194 nb_init()
1195 {
1196 	/* return ENOTSUP if there is no PCI config space support. */
1197 	if (pci_getl_func == NULL)
1198 		return (ENOTSUP);
1199 
1200 	/* get vendor and device */
1201 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1202 	switch (nb_chipset) {
1203 	default:
1204 		if (nb_5000_memory_controller == 0)
1205 			return (ENOTSUP);
1206 		break;
1207 	case INTEL_NB_7300:
1208 	case INTEL_NB_5000P:
1209 	case INTEL_NB_5000X:
1210 		break;
1211 	case INTEL_NB_5000V:
1212 	case INTEL_NB_5000Z:
1213 		nb_number_memory_controllers = 1;
1214 		break;
1215 	case INTEL_NB_5400:
1216 	case INTEL_NB_5400A:
1217 	case INTEL_NB_5400B:
1218 		nb_chipset = INTEL_NB_5400;
1219 		break;
1220 	}
1221 	return (0);
1222 }
1223 
1224 void
1225 nb_dev_reinit()
1226 {
1227 	int i, j;
1228 	int nchannels = nb_number_memory_controllers * 2;
1229 	nb_dimm_t **dimmpp;
1230 	nb_dimm_t *dimmp;
1231 	nb_dimm_t **old_nb_dimms;
1232 	int old_nb_dimms_per_channel;
1233 	find_dimm_label_t *label_function_p;
1234 	int dimm_slot = nb_dimm_slots;
1235 
1236 	old_nb_dimms = nb_dimms;
1237 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1238 
1239 	dimm_fini();
1240 	nb_dimms_per_channel = 0;
1241 	label_function_p = find_dimms_per_channel();
1242 	dimm_init();
1243 	nb_dimms_init(label_function_p);
1244 	nb_mc_init();
1245 	nb_pex_init();
1246 	nb_int_init();
1247 	nb_thr_init();
1248 	nb_fbd_init();
1249 	nb_fsb_init();
1250 	nb_scrubber_enable();
1251 
1252 	dimmpp = old_nb_dimms;
1253 	for (i = 0; i < nchannels; i++) {
1254 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1255 			dimmp = *dimmpp;
1256 			if (dimmp) {
1257 				kmem_free(dimmp, sizeof (nb_dimm_t));
1258 				*dimmpp = NULL;
1259 			}
1260 			dimmp++;
1261 		}
1262 	}
1263 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1264 }
1265 
1266 void
1267 nb_dev_unload()
1268 {
1269 	errorq_destroy(nb_queue);
1270 	nb_queue = NULL;
1271 	mutex_destroy(&nb_mutex);
1272 	nb_int_fini();
1273 	nb_thr_fini();
1274 	nb_fbd_fini();
1275 	nb_fsb_fini();
1276 	nb_pex_fini();
1277 	nb_fini();
1278 }
1279 
1280 void
1281 nb_unload()
1282 {
1283 }
1284