xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c (revision 628e3cbed6489fa1db545d8524a06cd6535af456)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_dimms_per_channel = 0;
56 static int ndimms = 0;
57 
58 nb_dimm_t **nb_dimms;
59 int nb_ndimm;
60 uint32_t nb_chipset;
61 enum nb_memory_mode nb_mode;
62 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
63 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
64 uint32_t top_of_low_memory;
65 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
66 
67 errorq_t *nb_queue;
68 kmutex_t nb_mutex;
69 
70 static uint8_t nb_err0_int;
71 static uint8_t nb_err1_int;
72 static uint8_t nb_err2_int;
73 static uint8_t nb_mcerr_int;
74 static uint32_t nb_emask_int;
75 
76 static uint32_t nb_err0_fbd;
77 static uint32_t nb_err1_fbd;
78 static uint32_t nb_err2_fbd;
79 static uint32_t nb_mcerr_fbd;
80 static uint32_t nb_emask_fbd;
81 
82 static uint16_t nb_err0_fsb;
83 static uint16_t nb_err1_fsb;
84 static uint16_t nb_err2_fsb;
85 static uint16_t nb_mcerr_fsb;
86 static uint16_t nb_emask_fsb;
87 
88 static uint16_t nb_err0_thr;
89 static uint16_t nb_err1_thr;
90 static uint16_t nb_err2_thr;
91 static uint16_t nb_mcerr_thr;
92 static uint16_t nb_emask_thr;
93 
94 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
95 static uint32_t emask_cor_pex[NB_PCI_DEV];
96 static uint32_t emask_rp_pex[NB_PCI_DEV];
97 static uint32_t docmd_pex[NB_PCI_DEV];
98 static uint32_t uncerrsev[NB_PCI_DEV];
99 
100 static uint8_t l_mcerr_int;
101 static uint32_t l_mcerr_fbd;
102 static uint16_t l_mcerr_fsb;
103 static uint16_t l_mcerr_thr;
104 
105 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
106 uint_t nb5400_emask_fbd = 0;
107 int nb5000_reset_emask_fbd = 1;
108 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
109 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
110 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
111 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
112 
113 uint_t nb5000_emask_fsb = 0;
114 int nb5000_reset_emask_fsb = 1;
115 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
116 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
117 
118 uint_t nb5400_emask_int = 0;
119 
120 uint_t nb7300_emask_int = EMASK_INT_7300;
121 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
122 uint_t nb5000_emask_int = EMASK_INT_5000;
123 int nb5000_reset_emask_int = 1;
124 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
125 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
126 
127 uint_t nb_mask_poll_thr = EMASK_THR_NF;
128 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
129 
130 int nb5000_reset_uncor_pex = 0;
131 uint_t nb5000_mask_uncor_pex = 0;
132 int nb5000_reset_cor_pex = 0;
133 uint_t nb5000_mask_cor_pex = 0xffffffff;
134 int nb_set_docmd = 1;
135 uint32_t nb5000_rp_pex = 0x1;
136 uint32_t nb5000_docmd_pex_mask = DOCMD_PEX_MASK;
137 uint32_t nb5400_docmd_pex_mask = DOCMD_5400_PEX_MASK;
138 uint32_t nb5000_docmd_pex = DOCMD_PEX;
139 uint32_t nb5400_docmd_pex = DOCMD_5400_PEX;
140 
141 int nb_mask_mc_set;
142 
143 typedef struct find_dimm_label {
144 	void (*label_function)(int, char *, int);
145 } find_dimm_label_t;
146 
147 static void x8450_dimm_label(int, char *, int);
148 
149 static struct platform_label {
150 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
151 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
152 	find_dimm_label_t dimm_label;
153 	int dimms_per_channel;
154 } platform_label[] = {
155 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
156 	    x8450_dimm_label, 8 },
157 	{ NULL, NULL, NULL, 0 }
158 };
159 
160 static unsigned short
161 read_spd(int bus)
162 {
163 	unsigned short rt = 0;
164 	int branch = bus >> 1;
165 	int channel = bus & 1;
166 
167 	rt = SPD_RD(branch, channel);
168 
169 	return (rt);
170 }
171 
172 static void
173 write_spdcmd(int bus, uint32_t val)
174 {
175 	int branch = bus >> 1;
176 	int channel = bus & 1;
177 	SPDCMD_WR(branch, channel, val);
178 }
179 
180 static int
181 read_spd_eeprom(int bus, int slave, int addr)
182 {
183 	int retry = 4;
184 	int wait;
185 	int spd;
186 	uint32_t cmd;
187 
188 	for (;;) {
189 		wait = 1000;
190 		for (;;) {
191 			spd = read_spd(bus);
192 			if ((spd & SPD_BUSY) == 0)
193 				break;
194 			if (--wait == 0)
195 				return (-1);
196 			drv_usecwait(10);
197 		}
198 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
199 		write_spdcmd(bus, cmd);
200 		wait = 1000;
201 		for (;;) {
202 			spd = read_spd(bus);
203 			if ((spd & SPD_BUSY) == 0)
204 				break;
205 			if (--wait == 0) {
206 				spd = SPD_BUS_ERROR;
207 				break;
208 			}
209 			drv_usecwait(10);
210 		}
211 		while ((spd & SPD_BUS_ERROR) == 0 &&
212 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
213 		    SPD_READ_DATA_VALID) {
214 			spd = read_spd(bus);
215 			if (--wait == 0)
216 				return (-1);
217 		}
218 		if ((spd & SPD_BUS_ERROR) == 0)
219 			break;
220 		if (--retry == 0)
221 			return (-1);
222 	}
223 	return (spd & 0xff);
224 }
225 
226 static void
227 nb_fini()
228 {
229 	int i, j;
230 	int nchannels = nb_number_memory_controllers * 2;
231 	nb_dimm_t **dimmpp;
232 	nb_dimm_t *dimmp;
233 
234 	dimmpp = nb_dimms;
235 	for (i = 0; i < nchannels; i++) {
236 		for (j = 0; j < nb_dimms_per_channel; j++) {
237 			dimmp = *dimmpp;
238 			if (dimmp) {
239 				kmem_free(dimmp, sizeof (nb_dimm_t));
240 				*dimmpp = NULL;
241 			}
242 			dimmp++;
243 		}
244 	}
245 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) *
246 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel);
247 	nb_dimms = NULL;
248 	dimm_fini();
249 }
250 
251 void
252 nb_scrubber_enable()
253 {
254 	uint32_t mc;
255 
256 	if (!nb_hw_memory_scrub_enable)
257 		return;
258 
259 	mc = MC_RD();
260 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
261 		mc |= MC_PATROL_SCRUB;
262 	else
263 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
264 	MC_WR(mc);
265 
266 	if (nb_sw_scrub_disabled++)
267 		cmi_mc_sw_memscrub_disable();
268 }
269 
270 static nb_dimm_t *
271 nb_dimm_init(int channel, int dimm, uint16_t mtr)
272 {
273 	nb_dimm_t *dp;
274 	int i, t;
275 	int spd_sz;
276 
277 	if (MTR_PRESENT(mtr) == 0)
278 		return (NULL);
279 	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
280 
281 	if (t != 9)
282 		return (NULL);
283 
284 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
285 
286 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
287 	if (t == 1)
288 		spd_sz = 128;
289 	else if (t == 2)
290 		spd_sz = 176;
291 	else
292 		spd_sz = 256;
293 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
294 	    (read_spd_eeprom(channel, dimm, 118) << 8);
295 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
296 	dp->serial_number =
297 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
298 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
299 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
300 	    read_spd_eeprom(channel, dimm, 125);
301 	t = read_spd_eeprom(channel, dimm, 121);
302 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
303 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
304 	if (spd_sz > 128) {
305 		for (i = 0; i < sizeof (dp->part_number); i++) {
306 			dp->part_number[i] =
307 			    read_spd_eeprom(channel, dimm, 128 + i);
308 		}
309 		for (i = 0; i < sizeof (dp->revision); i++) {
310 			dp->revision[i] =
311 			    read_spd_eeprom(channel, dimm, 146 + i);
312 		}
313 	}
314 	dp->mtr_present = MTR_PRESENT(mtr);
315 	dp->nranks = MTR_NUMRANK(mtr);
316 	dp->nbanks = MTR_NUMBANK(mtr);
317 	dp->ncolumn = MTR_NUMCOL(mtr);
318 	dp->nrow = MTR_NUMROW(mtr);
319 	dp->width = MTR_WIDTH(mtr);
320 	dp->dimm_size = MTR_DIMMSIZE(mtr);
321 
322 	return (dp);
323 }
324 
325 static uint64_t
326 mc_range(int controller, uint64_t base)
327 {
328 	int i;
329 	uint64_t limit = 0;
330 
331 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
332 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
333 		    base < nb_banks[i].limit) {
334 			limit = nb_banks[i].limit;
335 			if (base <= top_of_low_memory &&
336 			    limit > top_of_low_memory) {
337 				limit -= TLOW_MAX - top_of_low_memory;
338 			}
339 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
340 			    nb_mode != NB_MEMORY_MIRROR) {
341 				limit = limit / 2;
342 			}
343 		}
344 	}
345 	return (limit);
346 }
347 
348 void
349 nb_mc_init()
350 {
351 	uint16_t tolm;
352 	uint16_t mir;
353 	uint32_t hole_base;
354 	uint32_t hole_size;
355 	uint32_t dmir;
356 	uint64_t base;
357 	uint64_t limit;
358 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
359 	int i, j, k;
360 	uint8_t interleave;
361 
362 	base = 0;
363 	tolm = TOLM_RD();
364 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
365 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
366 		mir = MIR_RD(i);
367 		limit = (uint64_t)(mir >> 4) << 28;
368 		way0 = mir & 1;
369 		way1 = (mir >> 1) & 1;
370 		if (way0 == 0 && way1 == 0) {
371 			way0 = 1;
372 			way1 = 1;
373 		}
374 		if (limit > top_of_low_memory)
375 			limit += TLOW_MAX - top_of_low_memory;
376 		nb_banks[i].base = base;
377 		nb_banks[i].limit = limit;
378 		nb_banks[i].way[0] = way0;
379 		nb_banks[i].way[1] = way1;
380 		base = limit;
381 	}
382 	for (i = 0; i < nb_number_memory_controllers; i++) {
383 		base = 0;
384 
385 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
386 			dmir = DMIR_RD(i, j);
387 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
388 			if (limit == 0) {
389 				limit = mc_range(i, base);
390 			}
391 			branch_interleave = 0;
392 			hole_base = 0;
393 			hole_size = 0;
394 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
395 			if (rank0 == rank1)
396 				interleave = 1;
397 			else if (rank0 == rank2)
398 				interleave = 2;
399 			else
400 				interleave = 4;
401 			if (nb_mode != NB_MEMORY_MIRROR &&
402 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
403 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
404 					if (base >= nb_banks[k].base &&
405 					    base < nb_banks[k].limit) {
406 						if (nb_banks[i].way[0] &&
407 						    nb_banks[i].way[1]) {
408 							interleave *= 2;
409 							limit *= 2;
410 							branch_interleave = 1;
411 						}
412 						break;
413 					}
414 				}
415 			}
416 			if (base < top_of_low_memory &&
417 			    limit > top_of_low_memory) {
418 				hole_base = top_of_low_memory;
419 				hole_size = TLOW_MAX - top_of_low_memory;
420 				limit += hole_size;
421 			} else if (base > top_of_low_memory) {
422 				limit += TLOW_MAX - top_of_low_memory;
423 			}
424 			nb_ranks[i][j].base = base;
425 			nb_ranks[i][j].limit = limit;
426 			nb_ranks[i][j].rank[0] = rank0;
427 			nb_ranks[i][j].rank[1] = rank1;
428 			nb_ranks[i][j].rank[2] = rank2;
429 			nb_ranks[i][j].rank[3] = rank3;
430 			nb_ranks[i][j].interleave = interleave;
431 			nb_ranks[i][j].branch_interleave = branch_interleave;
432 			nb_ranks[i][j].hole_base = hole_base;
433 			nb_ranks[i][j].hole_size = hole_size;
434 			if (limit > base) {
435 				dimm_add_rank(i, rank0, branch_interleave, 0,
436 				    base, hole_base, hole_size, interleave,
437 				    limit);
438 				if (rank0 != rank1) {
439 					dimm_add_rank(i, rank1,
440 					    branch_interleave, 1, base,
441 					    hole_base, hole_size, interleave,
442 					    limit);
443 					if (rank0 != rank2) {
444 						dimm_add_rank(i, rank2,
445 						    branch_interleave, 2, base,
446 						    hole_base, hole_size,
447 						    interleave, limit);
448 						dimm_add_rank(i, rank3,
449 						    branch_interleave, 3, base,
450 						    hole_base, hole_size,
451 						    interleave, limit);
452 					}
453 				}
454 			}
455 			base = limit;
456 		}
457 	}
458 }
459 
460 void
461 nb_used_spare_rank(int branch, int bad_rank)
462 {
463 	int i;
464 	int j;
465 
466 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
467 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
468 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
469 				nb_ranks[branch][i].rank[j] =
470 				    spare_rank[branch];
471 				i = NB_MEM_RANK_SELECT;
472 				break;
473 			}
474 		}
475 	}
476 }
477 
478 /*ARGSUSED*/
479 static int
480 memoryarray(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
481 {
482 	smbios_memarray_t ma;
483 
484 	if (sp->smbstr_type == SMB_TYPE_MEMARRAY &&
485 	    smbios_info_memarray(shp, sp->smbstr_id, &ma) == 0) {
486 		ndimms += ma.smbma_ndevs;
487 	}
488 	return (0);
489 }
490 
491 find_dimm_label_t *
492 find_dimms_per_channel()
493 {
494 	struct platform_label *pl;
495 	smbios_info_t si;
496 	smbios_system_t sy;
497 	id_t id;
498 	int read_memarray = 1;
499 	find_dimm_label_t *rt = NULL;
500 
501 	if (ksmbios != NULL) {
502 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
503 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
504 			for (pl = platform_label; pl->sys_vendor; pl++) {
505 				if (strncmp(pl->sys_vendor,
506 				    si.smbi_manufacturer,
507 				    strlen(pl->sys_vendor)) == 0 &&
508 				    strncmp(pl->sys_product, si.smbi_product,
509 				    strlen(pl->sys_product)) == 0) {
510 					nb_dimms_per_channel =
511 					    pl->dimms_per_channel;
512 					read_memarray = 0;
513 					rt = &pl->dimm_label;
514 					break;
515 				}
516 			}
517 		}
518 		if (read_memarray)
519 			(void) smbios_iter(ksmbios, memoryarray, 0);
520 	}
521 	if (nb_dimms_per_channel == 0) {
522 		if (ndimms) {
523 			nb_dimms_per_channel = ndimms /
524 			    (nb_number_memory_controllers * 2);
525 		} else {
526 			nb_dimms_per_channel = NB_MAX_DIMMS_PER_CHANNEL;
527 		}
528 	}
529 	return (rt);
530 }
531 
532 static int
533 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
534 {
535 	nb_dimm_t ***dimmpp = arg;
536 	nb_dimm_t *dimmp;
537 	smbios_memdevice_t md;
538 
539 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
540 		dimmp = **dimmpp;
541 		if (dimmp && smbios_info_memdevice(shp, sp->smbstr_id,
542 		    &md) == 0 && md.smbmd_dloc != NULL) {
543 			(void) snprintf(dimmp->label,
544 			    sizeof (dimmp->label), "%s", md.smbmd_dloc);
545 		}
546 		(*dimmpp)++;
547 	}
548 	return (0);
549 }
550 
551 void
552 nb_smbios()
553 {
554 	nb_dimm_t **dimmpp;
555 
556 	if (ksmbios != NULL) {
557 		dimmpp = nb_dimms;
558 		(void) smbios_iter(ksmbios, dimm_label, &dimmpp);
559 	}
560 }
561 
562 static void
563 x8450_dimm_label(int dimm, char *label, int label_sz)
564 {
565 	int channel = dimm >> 3;
566 
567 	dimm = dimm & 0x7;
568 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
569 }
570 
571 static void
572 nb_dimms_init(find_dimm_label_t *label_function)
573 {
574 	int i, j, k, l;
575 	uint16_t mtr;
576 	uint32_t mc, mca;
577 	uint32_t spcpc;
578 	uint8_t spcps;
579 	nb_dimm_t **dimmpp;
580 
581 	mca = MCA_RD();
582 	mc = MC_RD();
583 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
584 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
585 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
586 		nb_mode = NB_MEMORY_MIRROR;
587 	else
588 		nb_mode = NB_MEMORY_NORMAL;
589 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
590 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel, KM_SLEEP);
591 	dimmpp = nb_dimms;
592 	for (i = 0; i < nb_number_memory_controllers; i++) {
593 		if (nb_mode == NB_MEMORY_NORMAL) {
594 			spcpc = SPCPC_RD(i);
595 			spcps = SPCPS_RD(i);
596 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
597 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
598 				nb_mode = NB_MEMORY_SPARE_RANK;
599 			spare_rank[i] = SPCPC_SPRANK(spcpc);
600 		}
601 		for (j = 0; j < nb_dimms_per_channel; j++) {
602 			mtr = MTR_RD(i, j);
603 			k = i * 2;
604 			dimmpp[j] = nb_dimm_init(k, j, mtr);
605 			if (dimmpp[j]) {
606 				nb_ndimm ++;
607 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
608 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
609 				    dimmpp[j]->nrow);
610 				if (label_function) {
611 					label_function->label_function(
612 					    (k * nb_dimms_per_channel) + j,
613 					    dimmpp[j]->label,
614 					    sizeof (dimmpp[j]->label));
615 				}
616 			}
617 			dimmpp[j + nb_dimms_per_channel] =
618 			    nb_dimm_init(k + 1, j, mtr);
619 			l = j + nb_dimms_per_channel;
620 			if (dimmpp[l]) {
621 				if (label_function) {
622 					label_function->label_function(
623 					    (k * nb_dimms_per_channel) + l,
624 					    dimmpp[l]->label,
625 					    sizeof (dimmpp[l]->label));
626 				}
627 				nb_ndimm ++;
628 			}
629 		}
630 		dimmpp += nb_dimms_per_channel * 2;
631 	}
632 	if (label_function == NULL)
633 		nb_smbios();
634 }
635 
636 
637 /* Setup the ESI port registers to enable SERR for southbridge */
638 static void
639 nb_pex_init()
640 {
641 	int i = 0; /* ESI port */
642 	uint32_t mask;
643 	uint16_t regw;
644 
645 	emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
646 	emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
647 	emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
648 	docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
649 	uncerrsev[i] = UNCERRSEV_RD(i);
650 
651 	if (nb5000_reset_uncor_pex)
652 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
653 	if (nb5000_reset_cor_pex)
654 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
655 	if (nb_set_docmd) {
656 		if (nb_chipset == INTEL_NB_5400) {
657 			/* disable masking of ERR pins used by DOCMD */
658 			PEX_ERR_PIN_MASK_WR(i, 0x10);
659 
660 			mask = (docmd_pex[i] & nb5400_docmd_pex_mask) |
661 			    (nb5400_docmd_pex & ~nb5400_docmd_pex_mask);
662 		} else {
663 			mask = (docmd_pex[i] & nb5000_docmd_pex_mask) |
664 			    (nb5000_docmd_pex & ~nb5000_docmd_pex_mask);
665 		}
666 		PEX_ERR_DOCMD_WR(i, mask);
667 	}
668 
669 	/* RP error message (CE/NFE/FE) detect mask */
670 	EMASK_RP_PEX_WR(i, nb5000_rp_pex);
671 
672 	/* Command Register - Enable SERR */
673 	regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
674 	nb_pci_putw(0, i, 0, PCI_CONF_COMM,
675 	    regw | PCI_COMM_SERR_ENABLE);
676 
677 	/* Root Control Register - SERR on NFE/FE */
678 	PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
679 	    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
680 
681 	/* AER UE Mask - Mask UR */
682 	UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
683 }
684 
685 static void
686 nb_pex_fini()
687 {
688 	int i = 0; /* ESI port */
689 
690 	EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
691 	EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
692 	EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
693 	PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
694 
695 	if (nb5000_reset_uncor_pex)
696 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
697 	if (nb5000_reset_cor_pex)
698 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
699 }
700 
701 void
702 nb_int_init()
703 {
704 	uint8_t err0_int;
705 	uint8_t err1_int;
706 	uint8_t err2_int;
707 	uint8_t mcerr_int;
708 	uint32_t emask_int;
709 	uint16_t stepping;
710 
711 	err0_int = ERR0_INT_RD();
712 	err1_int = ERR1_INT_RD();
713 	err2_int = ERR2_INT_RD();
714 	mcerr_int = MCERR_INT_RD();
715 	emask_int = EMASK_INT_RD();
716 
717 	nb_err0_int = err0_int;
718 	nb_err1_int = err1_int;
719 	nb_err2_int = err2_int;
720 	nb_mcerr_int = mcerr_int;
721 	nb_emask_int = emask_int;
722 
723 	ERR0_INT_WR(0xff);
724 	ERR1_INT_WR(0xff);
725 	ERR2_INT_WR(0xff);
726 	MCERR_INT_WR(0xff);
727 	EMASK_INT_WR(0xff);
728 
729 	mcerr_int &= ~nb5000_mask_bios_int;
730 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
731 	mcerr_int |= nb5000_mask_poll_int;
732 	err0_int |= nb5000_mask_poll_int;
733 	err1_int |= nb5000_mask_poll_int;
734 	err2_int |= nb5000_mask_poll_int;
735 
736 	l_mcerr_int = mcerr_int;
737 	ERR0_INT_WR(err0_int);
738 	ERR1_INT_WR(err1_int);
739 	ERR2_INT_WR(err2_int);
740 	MCERR_INT_WR(mcerr_int);
741 	if (nb5000_reset_emask_int) {
742 		if (nb_chipset == INTEL_NB_7300) {
743 			stepping = NB5000_STEPPING();
744 			if (stepping == 0)
745 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
746 			else
747 				EMASK_5000_INT_WR(nb7300_emask_int);
748 		} else if (nb_chipset == INTEL_NB_5400) {
749 			EMASK_5400_INT_WR(nb5400_emask_int |
750 			    (emask_int & EMASK_INT_RES));
751 		} else {
752 			EMASK_5000_INT_WR(nb5000_emask_int);
753 		}
754 	} else {
755 		EMASK_INT_WR(nb_emask_int);
756 	}
757 }
758 
759 void
760 nb_int_fini()
761 {
762 	ERR0_INT_WR(0xff);
763 	ERR1_INT_WR(0xff);
764 	ERR2_INT_WR(0xff);
765 	MCERR_INT_WR(0xff);
766 	EMASK_INT_WR(0xff);
767 
768 	ERR0_INT_WR(nb_err0_int);
769 	ERR1_INT_WR(nb_err1_int);
770 	ERR2_INT_WR(nb_err2_int);
771 	MCERR_INT_WR(nb_mcerr_int);
772 	EMASK_INT_WR(nb_emask_int);
773 }
774 
775 void
776 nb_int_mask_mc(uint32_t mc_mask_int)
777 {
778 	uint32_t emask_int;
779 
780 	emask_int = MCERR_INT_RD();
781 	if ((emask_int & mc_mask_int) != mc_mask_int) {
782 		MCERR_INT_WR(emask_int|mc_mask_int);
783 		nb_mask_mc_set = 1;
784 	}
785 }
786 
787 void
788 nb_fbd_init()
789 {
790 	uint32_t err0_fbd;
791 	uint32_t err1_fbd;
792 	uint32_t err2_fbd;
793 	uint32_t mcerr_fbd;
794 	uint32_t emask_fbd;
795 	uint32_t emask_bios_fbd;
796 	uint32_t emask_poll_fbd;
797 
798 	err0_fbd = ERR0_FBD_RD();
799 	err1_fbd = ERR1_FBD_RD();
800 	err2_fbd = ERR2_FBD_RD();
801 	mcerr_fbd = MCERR_FBD_RD();
802 	emask_fbd = EMASK_FBD_RD();
803 
804 	nb_err0_fbd = err0_fbd;
805 	nb_err1_fbd = err1_fbd;
806 	nb_err2_fbd = err2_fbd;
807 	nb_mcerr_fbd = mcerr_fbd;
808 	nb_emask_fbd = emask_fbd;
809 
810 	ERR0_FBD_WR(0xffffffff);
811 	ERR1_FBD_WR(0xffffffff);
812 	ERR2_FBD_WR(0xffffffff);
813 	MCERR_FBD_WR(0xffffffff);
814 	EMASK_FBD_WR(0xffffffff);
815 
816 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
817 		/* MCH 7300 errata 34 */
818 		emask_bios_fbd = nb5000_mask_bios_fbd & ~EMASK_FBD_M23;
819 		emask_poll_fbd = nb5000_mask_poll_fbd;
820 		mcerr_fbd |= EMASK_FBD_M23;
821 	} else if (nb_chipset == INTEL_NB_5400) {
822 		emask_bios_fbd = nb5400_mask_bios_fbd;
823 		emask_poll_fbd = nb5400_mask_poll_fbd;
824 	} else {
825 		emask_bios_fbd = nb5000_mask_bios_fbd;
826 		emask_poll_fbd = nb5000_mask_poll_fbd;
827 	}
828 	mcerr_fbd &= ~emask_bios_fbd;
829 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
830 	mcerr_fbd |= emask_poll_fbd;
831 	err0_fbd |= emask_poll_fbd;
832 	err1_fbd |= emask_poll_fbd;
833 	err2_fbd |= emask_poll_fbd;
834 
835 	l_mcerr_fbd = mcerr_fbd;
836 	ERR0_FBD_WR(err0_fbd);
837 	ERR1_FBD_WR(err1_fbd);
838 	ERR2_FBD_WR(err2_fbd);
839 	MCERR_FBD_WR(mcerr_fbd);
840 	if (nb5000_reset_emask_fbd) {
841 		if (nb_chipset == INTEL_NB_5400)
842 			EMASK_FBD_WR(nb5400_emask_fbd);
843 		else
844 			EMASK_FBD_WR(nb5000_emask_fbd);
845 	} else {
846 		EMASK_FBD_WR(nb_emask_fbd);
847 	}
848 }
849 
850 void
851 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
852 {
853 	uint32_t emask_fbd;
854 
855 	emask_fbd = MCERR_FBD_RD();
856 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
857 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
858 		nb_mask_mc_set = 1;
859 	}
860 }
861 
862 void
863 nb_fbd_fini()
864 {
865 	ERR0_FBD_WR(0xffffffff);
866 	ERR1_FBD_WR(0xffffffff);
867 	ERR2_FBD_WR(0xffffffff);
868 	MCERR_FBD_WR(0xffffffff);
869 	EMASK_FBD_WR(0xffffffff);
870 
871 	ERR0_FBD_WR(nb_err0_fbd);
872 	ERR1_FBD_WR(nb_err1_fbd);
873 	ERR2_FBD_WR(nb_err2_fbd);
874 	MCERR_FBD_WR(nb_mcerr_fbd);
875 	EMASK_FBD_WR(nb_emask_fbd);
876 }
877 
878 static void
879 nb_fsb_init()
880 {
881 	uint16_t err0_fsb;
882 	uint16_t err1_fsb;
883 	uint16_t err2_fsb;
884 	uint16_t mcerr_fsb;
885 	uint16_t emask_fsb;
886 
887 	err0_fsb = ERR0_FSB_RD(0);
888 	err1_fsb = ERR1_FSB_RD(0);
889 	err2_fsb = ERR2_FSB_RD(0);
890 	mcerr_fsb = MCERR_FSB_RD(0);
891 	emask_fsb = EMASK_FSB_RD(0);
892 
893 	ERR0_FSB_WR(0, 0xffff);
894 	ERR1_FSB_WR(0, 0xffff);
895 	ERR2_FSB_WR(0, 0xffff);
896 	MCERR_FSB_WR(0, 0xffff);
897 	EMASK_FSB_WR(0, 0xffff);
898 
899 	ERR0_FSB_WR(1, 0xffff);
900 	ERR1_FSB_WR(1, 0xffff);
901 	ERR2_FSB_WR(1, 0xffff);
902 	MCERR_FSB_WR(1, 0xffff);
903 	EMASK_FSB_WR(1, 0xffff);
904 
905 	nb_err0_fsb = err0_fsb;
906 	nb_err1_fsb = err1_fsb;
907 	nb_err2_fsb = err2_fsb;
908 	nb_mcerr_fsb = mcerr_fsb;
909 	nb_emask_fsb = emask_fsb;
910 
911 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
912 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
913 	mcerr_fsb |= nb5000_mask_poll_fsb;
914 	err0_fsb |= nb5000_mask_poll_fsb;
915 	err1_fsb |= nb5000_mask_poll_fsb;
916 	err2_fsb |= nb5000_mask_poll_fsb;
917 
918 	l_mcerr_fsb = mcerr_fsb;
919 	ERR0_FSB_WR(0, err0_fsb);
920 	ERR1_FSB_WR(0, err1_fsb);
921 	ERR2_FSB_WR(0, err2_fsb);
922 	MCERR_FSB_WR(0, mcerr_fsb);
923 	if (nb5000_reset_emask_fsb) {
924 		EMASK_FSB_WR(0, nb5000_emask_fsb);
925 	} else {
926 		EMASK_FSB_WR(0, nb_emask_fsb);
927 	}
928 
929 	ERR0_FSB_WR(1, err0_fsb);
930 	ERR1_FSB_WR(1, err1_fsb);
931 	ERR2_FSB_WR(1, err2_fsb);
932 	MCERR_FSB_WR(1, mcerr_fsb);
933 	if (nb5000_reset_emask_fsb) {
934 		EMASK_FSB_WR(1, nb5000_emask_fsb);
935 	} else {
936 		EMASK_FSB_WR(1, nb_emask_fsb);
937 	}
938 
939 	if (nb_chipset == INTEL_NB_7300) {
940 		ERR0_FSB_WR(2, 0xffff);
941 		ERR1_FSB_WR(2, 0xffff);
942 		ERR2_FSB_WR(2, 0xffff);
943 		MCERR_FSB_WR(2, 0xffff);
944 		EMASK_FSB_WR(2, 0xffff);
945 
946 		ERR0_FSB_WR(3, 0xffff);
947 		ERR1_FSB_WR(3, 0xffff);
948 		ERR2_FSB_WR(3, 0xffff);
949 		MCERR_FSB_WR(3, 0xffff);
950 		EMASK_FSB_WR(3, 0xffff);
951 
952 		ERR0_FSB_WR(2, err0_fsb);
953 		ERR1_FSB_WR(2, err1_fsb);
954 		ERR2_FSB_WR(2, err2_fsb);
955 		MCERR_FSB_WR(2, mcerr_fsb);
956 		if (nb5000_reset_emask_fsb) {
957 			EMASK_FSB_WR(2, nb5000_emask_fsb);
958 		} else {
959 			EMASK_FSB_WR(2, nb_emask_fsb);
960 		}
961 
962 		ERR0_FSB_WR(3, err0_fsb);
963 		ERR1_FSB_WR(3, err1_fsb);
964 		ERR2_FSB_WR(3, err2_fsb);
965 		MCERR_FSB_WR(3, mcerr_fsb);
966 		if (nb5000_reset_emask_fsb) {
967 			EMASK_FSB_WR(3, nb5000_emask_fsb);
968 		} else {
969 			EMASK_FSB_WR(3, nb_emask_fsb);
970 		}
971 	}
972 }
973 
974 static void
975 nb_fsb_fini() {
976 	ERR0_FSB_WR(0, 0xffff);
977 	ERR1_FSB_WR(0, 0xffff);
978 	ERR2_FSB_WR(0, 0xffff);
979 	MCERR_FSB_WR(0, 0xffff);
980 	EMASK_FSB_WR(0, 0xffff);
981 
982 	ERR0_FSB_WR(0, nb_err0_fsb);
983 	ERR1_FSB_WR(0, nb_err1_fsb);
984 	ERR2_FSB_WR(0, nb_err2_fsb);
985 	MCERR_FSB_WR(0, nb_mcerr_fsb);
986 	EMASK_FSB_WR(0, nb_emask_fsb);
987 
988 	ERR0_FSB_WR(1, 0xffff);
989 	ERR1_FSB_WR(1, 0xffff);
990 	ERR2_FSB_WR(1, 0xffff);
991 	MCERR_FSB_WR(1, 0xffff);
992 	EMASK_FSB_WR(1, 0xffff);
993 
994 	ERR0_FSB_WR(1, nb_err0_fsb);
995 	ERR1_FSB_WR(1, nb_err1_fsb);
996 	ERR2_FSB_WR(1, nb_err2_fsb);
997 	MCERR_FSB_WR(1, nb_mcerr_fsb);
998 	EMASK_FSB_WR(1, nb_emask_fsb);
999 
1000 	if (nb_chipset == INTEL_NB_7300) {
1001 		ERR0_FSB_WR(2, 0xffff);
1002 		ERR1_FSB_WR(2, 0xffff);
1003 		ERR2_FSB_WR(2, 0xffff);
1004 		MCERR_FSB_WR(2, 0xffff);
1005 		EMASK_FSB_WR(2, 0xffff);
1006 
1007 		ERR0_FSB_WR(2, nb_err0_fsb);
1008 		ERR1_FSB_WR(2, nb_err1_fsb);
1009 		ERR2_FSB_WR(2, nb_err2_fsb);
1010 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1011 		EMASK_FSB_WR(2, nb_emask_fsb);
1012 
1013 		ERR0_FSB_WR(3, 0xffff);
1014 		ERR1_FSB_WR(3, 0xffff);
1015 		ERR2_FSB_WR(3, 0xffff);
1016 		MCERR_FSB_WR(3, 0xffff);
1017 		EMASK_FSB_WR(3, 0xffff);
1018 
1019 		ERR0_FSB_WR(3, nb_err0_fsb);
1020 		ERR1_FSB_WR(3, nb_err1_fsb);
1021 		ERR2_FSB_WR(3, nb_err2_fsb);
1022 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1023 		EMASK_FSB_WR(3, nb_emask_fsb);
1024 	}
1025 }
1026 
1027 void
1028 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1029 {
1030 	uint16_t emask_fsb;
1031 
1032 	emask_fsb = MCERR_FSB_RD(fsb);
1033 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1034 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1035 		nb_mask_mc_set = 1;
1036 	}
1037 }
1038 
1039 static void
1040 nb_thr_init()
1041 {
1042 	uint16_t err0_thr;
1043 	uint16_t err1_thr;
1044 	uint16_t err2_thr;
1045 	uint16_t mcerr_thr;
1046 	uint16_t emask_thr;
1047 
1048 	if (nb_chipset == INTEL_NB_5400) {
1049 		err0_thr = ERR0_THR_RD(0);
1050 		err1_thr = ERR1_THR_RD(0);
1051 		err2_thr = ERR2_THR_RD(0);
1052 		mcerr_thr = MCERR_THR_RD(0);
1053 		emask_thr = EMASK_THR_RD(0);
1054 
1055 		ERR0_THR_WR(0xffff);
1056 		ERR1_THR_WR(0xffff);
1057 		ERR2_THR_WR(0xffff);
1058 		MCERR_THR_WR(0xffff);
1059 		EMASK_THR_WR(0xffff);
1060 
1061 		nb_err0_thr = err0_thr;
1062 		nb_err1_thr = err1_thr;
1063 		nb_err2_thr = err2_thr;
1064 		nb_mcerr_thr = mcerr_thr;
1065 		nb_emask_thr = emask_thr;
1066 
1067 		mcerr_thr &= ~nb_mask_bios_thr;
1068 		mcerr_thr |= nb_mask_bios_thr &
1069 		    (~err2_thr | ~err1_thr | ~err0_thr);
1070 		mcerr_thr |= nb_mask_poll_thr;
1071 		err0_thr |= nb_mask_poll_thr;
1072 		err1_thr |= nb_mask_poll_thr;
1073 		err2_thr |= nb_mask_poll_thr;
1074 
1075 		l_mcerr_thr = mcerr_thr;
1076 		ERR0_THR_WR(err0_thr);
1077 		ERR1_THR_WR(err1_thr);
1078 		ERR2_THR_WR(err2_thr);
1079 		MCERR_THR_WR(mcerr_thr);
1080 		EMASK_THR_WR(nb_emask_thr);
1081 	}
1082 }
1083 
1084 static void
1085 nb_thr_fini()
1086 {
1087 	if (nb_chipset == INTEL_NB_5400) {
1088 		ERR0_THR_WR(0xffff);
1089 		ERR1_THR_WR(0xffff);
1090 		ERR2_THR_WR(0xffff);
1091 		MCERR_THR_WR(0xffff);
1092 		EMASK_THR_WR(0xffff);
1093 
1094 		ERR0_THR_WR(nb_err0_thr);
1095 		ERR1_THR_WR(nb_err1_thr);
1096 		ERR2_THR_WR(nb_err2_thr);
1097 		MCERR_THR_WR(nb_mcerr_thr);
1098 		EMASK_THR_WR(nb_emask_thr);
1099 	}
1100 }
1101 
1102 void
1103 nb_thr_mask_mc(uint16_t mc_mask_thr)
1104 {
1105 	uint16_t emask_thr;
1106 
1107 	emask_thr = MCERR_THR_RD(0);
1108 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1109 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1110 		nb_mask_mc_set = 1;
1111 	}
1112 }
1113 
1114 void
1115 nb_mask_mc_reset()
1116 {
1117 	MCERR_FBD_WR(l_mcerr_fbd);
1118 	MCERR_INT_WR(l_mcerr_int);
1119 	MCERR_FSB_WR(0, l_mcerr_fsb);
1120 	MCERR_FSB_WR(1, l_mcerr_fsb);
1121 	if (nb_chipset == INTEL_NB_7300) {
1122 		MCERR_FSB_WR(2, l_mcerr_fsb);
1123 		MCERR_FSB_WR(3, l_mcerr_fsb);
1124 	}
1125 	if (nb_chipset == INTEL_NB_5400) {
1126 		MCERR_THR_WR(l_mcerr_thr);
1127 	}
1128 }
1129 
1130 int
1131 nb_dev_init()
1132 {
1133 	find_dimm_label_t *label_function_p;
1134 
1135 	label_function_p = find_dimms_per_channel();
1136 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1137 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1138 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1139 	if (nb_queue == NULL) {
1140 		mutex_destroy(&nb_mutex);
1141 		return (EAGAIN);
1142 	}
1143 	nb_int_init();
1144 	nb_thr_init();
1145 	dimm_init();
1146 	nb_dimms_init(label_function_p);
1147 	nb_mc_init();
1148 	nb_pex_init();
1149 	nb_fbd_init();
1150 	nb_fsb_init();
1151 	nb_scrubber_enable();
1152 	return (0);
1153 }
1154 
1155 int
1156 nb_init()
1157 {
1158 	/* return ENOTSUP if there is no PCI config space support. */
1159 	if (pci_getl_func == NULL)
1160 		return (ENOTSUP);
1161 
1162 	/* get vendor and device */
1163 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1164 	switch (nb_chipset) {
1165 	default:
1166 		if (nb_5000_memory_controller == 0)
1167 			return (ENOTSUP);
1168 		break;
1169 	case INTEL_NB_7300:
1170 	case INTEL_NB_5000P:
1171 	case INTEL_NB_5000X:
1172 		break;
1173 	case INTEL_NB_5000V:
1174 	case INTEL_NB_5000Z:
1175 		nb_number_memory_controllers = 1;
1176 		break;
1177 	case INTEL_NB_5400:
1178 	case INTEL_NB_5400A:
1179 	case INTEL_NB_5400B:
1180 		nb_chipset = INTEL_NB_5400;
1181 		break;
1182 	}
1183 	return (0);
1184 }
1185 
1186 void
1187 nb_dev_reinit()
1188 {
1189 	int i, j;
1190 	int nchannels = nb_number_memory_controllers * 2;
1191 	nb_dimm_t **dimmpp;
1192 	nb_dimm_t *dimmp;
1193 	nb_dimm_t **old_nb_dimms;
1194 	int old_nb_dimms_per_channel;
1195 	find_dimm_label_t *label_function_p;
1196 
1197 	old_nb_dimms = nb_dimms;
1198 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1199 
1200 	dimm_fini();
1201 	label_function_p = find_dimms_per_channel();
1202 	dimm_init();
1203 	nb_dimms_init(label_function_p);
1204 	nb_mc_init();
1205 	nb_pex_init();
1206 	nb_int_init();
1207 	nb_thr_init();
1208 	nb_fbd_init();
1209 	nb_fsb_init();
1210 	nb_scrubber_enable();
1211 
1212 	dimmpp = old_nb_dimms;
1213 	for (i = 0; i < nchannels; i++) {
1214 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1215 			dimmp = *dimmpp;
1216 			if (dimmp) {
1217 				kmem_free(dimmp, sizeof (nb_dimm_t));
1218 				*dimmpp = NULL;
1219 			}
1220 			dimmp++;
1221 		}
1222 	}
1223 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) *
1224 	    nb_number_memory_controllers * 2 * old_nb_dimms_per_channel);
1225 }
1226 
1227 void
1228 nb_dev_unload()
1229 {
1230 	errorq_destroy(nb_queue);
1231 	nb_queue = NULL;
1232 	mutex_destroy(&nb_mutex);
1233 	nb_int_fini();
1234 	nb_thr_fini();
1235 	nb_fbd_fini();
1236 	nb_fsb_fini();
1237 	nb_pex_fini();
1238 	nb_fini();
1239 }
1240 
1241 void
1242 nb_unload()
1243 {
1244 }
1245