xref: /titanic_44/usr/src/uts/sun4u/opl/io/mc-opl.c (revision 97a81520ff6c5b6ca547c9b2932e02f6b1dbd49e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2008
27  */
28 
29 
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/conf.h>
33 #include <sys/modctl.h>
34 #include <sys/stat.h>
35 #include <sys/async.h>
36 #include <sys/machcpuvar.h>
37 #include <sys/machsystm.h>
38 #include <sys/promif.h>
39 #include <sys/ksynch.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/ddifm.h>
44 #include <sys/fm/protocol.h>
45 #include <sys/fm/util.h>
46 #include <sys/kmem.h>
47 #include <sys/fm/io/opl_mc_fm.h>
48 #include <sys/memlist.h>
49 #include <sys/param.h>
50 #include <sys/disp.h>
51 #include <vm/page.h>
52 #include <sys/mc-opl.h>
53 #include <sys/opl.h>
54 #include <sys/opl_dimm.h>
55 #include <sys/scfd/scfostoescf.h>
56 #include <sys/cpu_module.h>
57 #include <vm/seg_kmem.h>
58 #include <sys/vmem.h>
59 #include <vm/hat_sfmmu.h>
60 #include <sys/vmsystm.h>
61 #include <sys/membar.h>
62 #include <sys/mem.h>
63 
64 /*
65  * Function prototypes
66  */
67 static int mc_open(dev_t *, int, int, cred_t *);
68 static int mc_close(dev_t, int, int, cred_t *);
69 static int mc_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
70 static int mc_attach(dev_info_t *, ddi_attach_cmd_t);
71 static int mc_detach(dev_info_t *, ddi_detach_cmd_t);
72 
73 static int mc_poll_init(void);
74 static void mc_poll_fini(void);
75 static int mc_board_add(mc_opl_t *mcp);
76 static int mc_board_del(mc_opl_t *mcp);
77 static int mc_suspend(mc_opl_t *mcp, uint32_t flag);
78 static int mc_resume(mc_opl_t *mcp, uint32_t flag);
79 int opl_mc_suspend(void);
80 int opl_mc_resume(void);
81 
82 static void insert_mcp(mc_opl_t *mcp);
83 static void delete_mcp(mc_opl_t *mcp);
84 
85 static int pa_to_maddr(mc_opl_t *mcp, uint64_t pa, mc_addr_t *maddr);
86 
87 static int mc_rangecheck_pa(mc_opl_t *mcp, uint64_t pa);
88 
89 int mc_get_mem_unum(int, uint64_t, char *, int, int *);
90 int mc_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *paddr);
91 int mc_get_mem_offset(uint64_t paddr, uint64_t *offp);
92 int mc_get_mem_sid(char *unum, char *buf, int buflen, int *lenp);
93 int mc_get_mem_sid_dimm(mc_opl_t *mcp, char *dname, char *buf,
94     int buflen, int *lenp);
95 mc_dimm_info_t *mc_get_dimm_list(mc_opl_t *mcp);
96 mc_dimm_info_t *mc_prepare_dimmlist(board_dimm_info_t *bd_dimmp);
97 int mc_set_mem_sid(mc_opl_t *mcp, char *buf, int buflen, int lsb, int bank,
98     uint32_t mf_type, uint32_t d_slot);
99 static void mc_free_dimm_list(mc_dimm_info_t *d);
100 static void mc_get_mlist(mc_opl_t *);
101 static void mc_polling(void);
102 static int mc_opl_get_physical_board(int);
103 
104 static void mc_clear_rewrite(mc_opl_t *mcp, int i);
105 static void mc_set_rewrite(mc_opl_t *mcp, int bank, uint32_t addr, int state);
106 static int mc_scf_log_event(mc_flt_page_t *flt_pag);
107 
108 #ifdef	DEBUG
109 static int mc_ioctl_debug(dev_t, int, intptr_t, int, cred_t *, int *);
110 void mc_dump_dimm(char *buf, int dnamesz, int serialsz, int partnumsz);
111 void mc_dump_dimm_info(board_dimm_info_t *bd_dimmp);
112 #endif
113 
114 #pragma weak opl_get_physical_board
115 extern int opl_get_physical_board(int);
116 extern int plat_max_boards(void);
117 
118 /*
119  * Configuration data structures
120  */
121 static struct cb_ops mc_cb_ops = {
122 	mc_open,			/* open */
123 	mc_close,			/* close */
124 	nulldev,			/* strategy */
125 	nulldev,			/* print */
126 	nodev,				/* dump */
127 	nulldev,			/* read */
128 	nulldev,			/* write */
129 	mc_ioctl,			/* ioctl */
130 	nodev,				/* devmap */
131 	nodev,				/* mmap */
132 	nodev,				/* segmap */
133 	nochpoll,			/* poll */
134 	ddi_prop_op,			/* cb_prop_op */
135 	0,				/* streamtab */
136 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
137 	CB_REV,				/* rev */
138 	nodev,				/* cb_aread */
139 	nodev				/* cb_awrite */
140 };
141 
142 static struct dev_ops mc_ops = {
143 	DEVO_REV,			/* rev */
144 	0,				/* refcnt  */
145 	ddi_getinfo_1to1,		/* getinfo */
146 	nulldev,			/* identify */
147 	nulldev,			/* probe */
148 	mc_attach,			/* attach */
149 	mc_detach,			/* detach */
150 	nulldev,			/* reset */
151 	&mc_cb_ops,			/* cb_ops */
152 	(struct bus_ops *)0,		/* bus_ops */
153 	nulldev,			/* power */
154 	ddi_quiesce_not_needed,			/* quiesce */
155 };
156 
157 /*
158  * Driver globals
159  */
160 
161 static enum {
162 	MODEL_FF1,
163 	MODEL_FF2,
164 	MODEL_DC,
165 	MODEL_IKKAKU
166 } plat_model = MODEL_DC;	/* The default behaviour is DC */
167 
168 static struct plat_model_names {
169 	const char *unit_name;
170 	const char *mem_name;
171 } model_names[] = {
172 	{ "MBU_A", "MEMB" },
173 	{ "MBU_B", "MEMB" },
174 	{ "CMU", "" },
175 	{ "MBU_A", "" }
176 };
177 
178 /*
179  * The DIMM Names for DC platform.
180  * The index into this table is made up of (bank, dslot),
181  * Where dslot occupies bits 0-1 and bank occupies 2-4.
182  */
183 static char *mc_dc_dimm_unum_table[OPL_MAX_DIMMS] = {
184 	/* --------CMUnn----------- */
185 	/* --CS0-----|--CS1------ */
186 	/* -H-|--L-- | -H- | -L-- */
187 	"03A", "02A", "03B", "02B", /* Bank 0 (MAC 0 bank 0) */
188 	"13A", "12A", "13B", "12B", /* Bank 1 (MAC 0 bank 1) */
189 	"23A", "22A", "23B", "22B", /* Bank 2 (MAC 1 bank 0) */
190 	"33A", "32A", "33B", "32B", /* Bank 3 (MAC 1 bank 1) */
191 	"01A", "00A", "01B", "00B", /* Bank 4 (MAC 2 bank 0) */
192 	"11A", "10A", "11B", "10B", /* Bank 5 (MAC 2 bank 1) */
193 	"21A", "20A", "21B", "20B", /* Bank 6 (MAC 3 bank 0) */
194 	"31A", "30A", "31B", "30B"  /* Bank 7 (MAC 3 bank 1) */
195 };
196 
197 /*
198  * The DIMM Names for FF1/FF2/IKKAKU platforms.
199  * The index into this table is made up of (board, bank, dslot),
200  * Where dslot occupies bits 0-1, bank occupies 2-4 and
201  * board occupies the bit 5.
202  */
203 static char *mc_ff_dimm_unum_table[2 * OPL_MAX_DIMMS] = {
204 	/* --------CMU0---------- */
205 	/* --CS0-----|--CS1------ */
206 	/* -H-|--L-- | -H- | -L-- */
207 	"03A", "02A", "03B", "02B", /* Bank 0 (MAC 0 bank 0) */
208 	"01A", "00A", "01B", "00B", /* Bank 1 (MAC 0 bank 1) */
209 	"13A", "12A", "13B", "12B", /* Bank 2 (MAC 1 bank 0) */
210 	"11A", "10A", "11B", "10B", /* Bank 3 (MAC 1 bank 1) */
211 	"23A", "22A", "23B", "22B", /* Bank 4 (MAC 2 bank 0) */
212 	"21A", "20A", "21B", "20B", /* Bank 5 (MAC 2 bank 1) */
213 	"33A", "32A", "33B", "32B", /* Bank 6 (MAC 3 bank 0) */
214 	"31A", "30A", "31B", "30B", /* Bank 7 (MAC 3 bank 1) */
215 	/* --------CMU1---------- */
216 	/* --CS0-----|--CS1------ */
217 	/* -H-|--L-- | -H- | -L-- */
218 	"43A", "42A", "43B", "42B", /* Bank 0 (MAC 0 bank 0) */
219 	"41A", "40A", "41B", "40B", /* Bank 1 (MAC 0 bank 1) */
220 	"53A", "52A", "53B", "52B", /* Bank 2 (MAC 1 bank 0) */
221 	"51A", "50A", "51B", "50B", /* Bank 3 (MAC 1 bank 1) */
222 	"63A", "62A", "63B", "62B", /* Bank 4 (MAC 2 bank 0) */
223 	"61A", "60A", "61B", "60B", /* Bank 5 (MAC 2 bank 1) */
224 	"73A", "72A", "73B", "72B", /* Bank 6 (MAC 3 bank 0) */
225 	"71A", "70A", "71B", "70B"  /* Bank 7 (MAC 3 bank 1) */
226 };
227 
228 #define	BD_BK_SLOT_TO_INDEX(bd, bk, s)			\
229 	(((bd & 0x01) << 5) | ((bk & 0x07) << 2) | (s & 0x03))
230 
231 #define	INDEX_TO_BANK(i)			(((i) & 0x1C) >> 2)
232 #define	INDEX_TO_SLOT(i)			((i) & 0x03)
233 
234 #define	SLOT_TO_CS(slot)	((slot & 0x3) >> 1)
235 
236 /* Isolation unit size is 64 MB */
237 #define	MC_ISOLATION_BSIZE	(64 * 1024 * 1024)
238 
239 #define	MC_MAX_SPEEDS 7
240 
241 typedef struct {
242 	uint32_t mc_speeds;
243 	uint32_t mc_period;
244 } mc_scan_speed_t;
245 
246 #define	MC_CNTL_SPEED_SHIFT 26
247 
248 /*
249  * In mirror mode, we normalized the bank idx to "even" since
250  * the HW treats them as one unit w.r.t programming.
251  * This bank index will be the "effective" bank index.
252  * All mirrored bank state info on mc_period, mc_speedup_period
253  * will be stored in the even bank structure to avoid code duplication.
254  */
255 #define	MIRROR_IDX(bankidx)	(bankidx & ~1)
256 
257 static mc_scan_speed_t	mc_scan_speeds[MC_MAX_SPEEDS] = {
258 	{0x6 << MC_CNTL_SPEED_SHIFT, 0},
259 	{0x5 << MC_CNTL_SPEED_SHIFT, 32},
260 	{0x4 << MC_CNTL_SPEED_SHIFT, 64},
261 	{0x3 << MC_CNTL_SPEED_SHIFT, 128},
262 	{0x2 << MC_CNTL_SPEED_SHIFT, 256},
263 	{0x1 << MC_CNTL_SPEED_SHIFT, 512},
264 	{0x0 << MC_CNTL_SPEED_SHIFT, 1024}
265 };
266 
267 static uint32_t	mc_max_speed = (0x6 << 26);
268 
269 int mc_isolation_bsize = MC_ISOLATION_BSIZE;
270 int mc_patrol_interval_sec = MC_PATROL_INTERVAL_SEC;
271 int mc_max_scf_retry = 16;
272 int mc_max_scf_logs = 64;
273 int mc_max_errlog_processed = BANKNUM_PER_SB*2;
274 int mc_scan_period = 12 * 60 * 60;	/* 12 hours period */
275 int mc_max_rewrite_loop = 100;
276 int mc_rewrite_delay = 10;
277 /*
278  * it takes SCF about 300 m.s. to process a requst.  We can bail out
279  * if it is busy.  It does not pay to wait for it too long.
280  */
281 int mc_max_scf_loop = 2;
282 int mc_scf_delay = 100;
283 int mc_pce_dropped = 0;
284 int mc_poll_priority = MINCLSYSPRI;
285 int mc_max_rewrite_retry = 6 * 60;
286 
287 
288 /*
289  * Mutex hierarchy in mc-opl
290  * If both mcmutex and mc_lock must be held,
291  * mcmutex must be acquired first, and then mc_lock.
292  */
293 
294 static kmutex_t mcmutex;
295 mc_opl_t *mc_instances[OPL_MAX_BOARDS];
296 
297 static kmutex_t mc_polling_lock;
298 static kcondvar_t mc_polling_cv;
299 static kcondvar_t mc_poll_exit_cv;
300 static int mc_poll_cmd = 0;
301 static int mc_pollthr_running = 0;
302 int mc_timeout_period = 0; /* this is in m.s. */
303 void *mc_statep;
304 
305 #ifdef	DEBUG
306 int oplmc_debug = 0;
307 #endif
308 
309 static int mc_debug_show_all = 0;
310 
311 extern struct mod_ops mod_driverops;
312 
313 static struct modldrv modldrv = {
314 	&mod_driverops,			/* module type, this one is a driver */
315 	"OPL Memory-controller",	/* module name */
316 	&mc_ops,			/* driver ops */
317 };
318 
319 static struct modlinkage modlinkage = {
320 	MODREV_1,		/* rev */
321 	(void *)&modldrv,
322 	NULL
323 };
324 
325 #pragma weak opl_get_mem_unum
326 #pragma weak opl_get_mem_sid
327 #pragma weak opl_get_mem_offset
328 #pragma weak opl_get_mem_addr
329 
330 extern int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *);
331 extern int (*opl_get_mem_sid)(char *unum, char *buf, int buflen, int *lenp);
332 extern int (*opl_get_mem_offset)(uint64_t paddr, uint64_t *offp);
333 extern int (*opl_get_mem_addr)(char *unum, char *sid, uint64_t offset,
334     uint64_t *paddr);
335 
336 
337 /*
338  * pseudo-mc node portid format
339  *
340  *		[10]   = 0
341  *		[9]    = 1
342  *		[8]    = LSB_ID[4] = 0
343  *		[7:4]  = LSB_ID[3:0]
344  *		[3:0]  = 0
345  *
346  */
347 
348 /*
349  * These are the module initialization routines.
350  */
351 int
352 _init(void)
353 {
354 	int	error;
355 	int	plen;
356 	char	model[20];
357 	pnode_t	node;
358 
359 
360 	if ((error = ddi_soft_state_init(&mc_statep,
361 	    sizeof (mc_opl_t), 1)) != 0)
362 		return (error);
363 
364 	if ((error = mc_poll_init()) != 0) {
365 		ddi_soft_state_fini(&mc_statep);
366 		return (error);
367 	}
368 
369 	mutex_init(&mcmutex, NULL, MUTEX_DRIVER, NULL);
370 	if (&opl_get_mem_unum)
371 		opl_get_mem_unum = mc_get_mem_unum;
372 	if (&opl_get_mem_sid)
373 		opl_get_mem_sid = mc_get_mem_sid;
374 	if (&opl_get_mem_offset)
375 		opl_get_mem_offset = mc_get_mem_offset;
376 	if (&opl_get_mem_addr)
377 		opl_get_mem_addr = mc_get_mem_addr;
378 
379 	node = prom_rootnode();
380 	plen = prom_getproplen(node, "model");
381 
382 	if (plen > 0 && plen < sizeof (model)) {
383 		(void) prom_getprop(node, "model", model);
384 		model[plen] = '\0';
385 		if (strcmp(model, "FF1") == 0)
386 			plat_model = MODEL_FF1;
387 		else if (strcmp(model, "FF2") == 0)
388 			plat_model = MODEL_FF2;
389 		else if (strncmp(model, "DC", 2) == 0)
390 			plat_model = MODEL_DC;
391 		else if (strcmp(model, "IKKAKU") == 0)
392 			plat_model = MODEL_IKKAKU;
393 	}
394 
395 	error =  mod_install(&modlinkage);
396 	if (error != 0) {
397 		if (&opl_get_mem_unum)
398 			opl_get_mem_unum = NULL;
399 		if (&opl_get_mem_sid)
400 			opl_get_mem_sid = NULL;
401 		if (&opl_get_mem_offset)
402 			opl_get_mem_offset = NULL;
403 		if (&opl_get_mem_addr)
404 			opl_get_mem_addr = NULL;
405 		mutex_destroy(&mcmutex);
406 		mc_poll_fini();
407 		ddi_soft_state_fini(&mc_statep);
408 	}
409 	return (error);
410 }
411 
412 int
413 _fini(void)
414 {
415 	int error;
416 
417 	if ((error = mod_remove(&modlinkage)) != 0)
418 		return (error);
419 
420 	if (&opl_get_mem_unum)
421 		opl_get_mem_unum = NULL;
422 	if (&opl_get_mem_sid)
423 		opl_get_mem_sid = NULL;
424 	if (&opl_get_mem_offset)
425 		opl_get_mem_offset = NULL;
426 	if (&opl_get_mem_addr)
427 		opl_get_mem_addr = NULL;
428 
429 	mutex_destroy(&mcmutex);
430 	mc_poll_fini();
431 	ddi_soft_state_fini(&mc_statep);
432 
433 	return (0);
434 }
435 
436 int
437 _info(struct modinfo *modinfop)
438 {
439 	return (mod_info(&modlinkage, modinfop));
440 }
441 
442 static void
443 mc_polling_thread()
444 {
445 	mutex_enter(&mc_polling_lock);
446 	mc_pollthr_running = 1;
447 	while (!(mc_poll_cmd & MC_POLL_EXIT)) {
448 		mc_polling();
449 		(void) cv_reltimedwait(&mc_polling_cv, &mc_polling_lock,
450 		    mc_timeout_period, TR_CLOCK_TICK);
451 	}
452 	mc_pollthr_running = 0;
453 
454 	/*
455 	 * signal if any one is waiting for this thread to exit.
456 	 */
457 	cv_signal(&mc_poll_exit_cv);
458 	mutex_exit(&mc_polling_lock);
459 	thread_exit();
460 	/* NOTREACHED */
461 }
462 
463 static int
464 mc_poll_init()
465 {
466 	mutex_init(&mc_polling_lock, NULL, MUTEX_DRIVER, NULL);
467 	cv_init(&mc_polling_cv, NULL, CV_DRIVER, NULL);
468 	cv_init(&mc_poll_exit_cv, NULL, CV_DRIVER, NULL);
469 	return (0);
470 }
471 
472 static void
473 mc_poll_fini()
474 {
475 	mutex_enter(&mc_polling_lock);
476 	if (mc_pollthr_running) {
477 		mc_poll_cmd = MC_POLL_EXIT;
478 		cv_signal(&mc_polling_cv);
479 		while (mc_pollthr_running) {
480 			cv_wait(&mc_poll_exit_cv, &mc_polling_lock);
481 		}
482 	}
483 	mutex_exit(&mc_polling_lock);
484 	mutex_destroy(&mc_polling_lock);
485 	cv_destroy(&mc_polling_cv);
486 	cv_destroy(&mc_poll_exit_cv);
487 }
488 
489 static int
490 mc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
491 {
492 	mc_opl_t *mcp;
493 	int instance;
494 	int rv;
495 
496 	/* get the instance of this devi */
497 	instance = ddi_get_instance(devi);
498 
499 	switch (cmd) {
500 	case DDI_ATTACH:
501 		break;
502 	case DDI_RESUME:
503 		mcp = ddi_get_soft_state(mc_statep, instance);
504 		rv = mc_resume(mcp, MC_DRIVER_SUSPENDED);
505 		return (rv);
506 	default:
507 		return (DDI_FAILURE);
508 	}
509 
510 	if (ddi_soft_state_zalloc(mc_statep, instance) != DDI_SUCCESS)
511 		return (DDI_FAILURE);
512 
513 	if (ddi_create_minor_node(devi, "mc-opl", S_IFCHR, instance,
514 	    "ddi_mem_ctrl", 0) != DDI_SUCCESS) {
515 		MC_LOG("mc_attach: create_minor_node failed\n");
516 		return (DDI_FAILURE);
517 	}
518 
519 	if ((mcp = ddi_get_soft_state(mc_statep, instance)) == NULL) {
520 		goto bad;
521 	}
522 
523 	if (mc_timeout_period == 0) {
524 		mc_patrol_interval_sec = (int)ddi_getprop(DDI_DEV_T_ANY, devi,
525 		    DDI_PROP_DONTPASS, "mc-timeout-interval-sec",
526 		    mc_patrol_interval_sec);
527 		mc_timeout_period = drv_usectohz(1000000 *
528 		    mc_patrol_interval_sec / OPL_MAX_BOARDS);
529 	}
530 
531 	/* set informations in mc state */
532 	mcp->mc_dip = devi;
533 
534 	if (mc_board_add(mcp))
535 		goto bad;
536 
537 	insert_mcp(mcp);
538 
539 	/*
540 	 * Start the polling thread if it is not running already.
541 	 */
542 	mutex_enter(&mc_polling_lock);
543 	if (!mc_pollthr_running) {
544 		(void) thread_create(NULL, 0, (void (*)())mc_polling_thread,
545 		    NULL, 0, &p0, TS_RUN, mc_poll_priority);
546 	}
547 	mutex_exit(&mc_polling_lock);
548 	ddi_report_dev(devi);
549 
550 	return (DDI_SUCCESS);
551 
552 bad:
553 	ddi_remove_minor_node(devi, NULL);
554 	ddi_soft_state_free(mc_statep, instance);
555 	return (DDI_FAILURE);
556 }
557 
558 /* ARGSUSED */
559 static int
560 mc_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
561 {
562 	int rv;
563 	int instance;
564 	mc_opl_t *mcp;
565 
566 	/* get the instance of this devi */
567 	instance = ddi_get_instance(devi);
568 	if ((mcp = ddi_get_soft_state(mc_statep, instance)) == NULL) {
569 		return (DDI_FAILURE);
570 	}
571 
572 	switch (cmd) {
573 	case DDI_SUSPEND:
574 		rv = mc_suspend(mcp, MC_DRIVER_SUSPENDED);
575 		return (rv);
576 	case DDI_DETACH:
577 		break;
578 	default:
579 		return (DDI_FAILURE);
580 	}
581 
582 	delete_mcp(mcp);
583 	if (mc_board_del(mcp) != DDI_SUCCESS) {
584 		return (DDI_FAILURE);
585 	}
586 
587 	ddi_remove_minor_node(devi, NULL);
588 
589 	/* free up the soft state */
590 	ddi_soft_state_free(mc_statep, instance);
591 
592 	return (DDI_SUCCESS);
593 }
594 
595 /* ARGSUSED */
596 static int
597 mc_open(dev_t *devp, int flag, int otyp, cred_t *credp)
598 {
599 	return (0);
600 }
601 
602 /* ARGSUSED */
603 static int
604 mc_close(dev_t devp, int flag, int otyp, cred_t *credp)
605 {
606 	return (0);
607 }
608 
609 /* ARGSUSED */
610 static int
611 mc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
612 	int *rvalp)
613 {
614 	mc_flt_page_t flt_page;
615 
616 	if (cmd == MCIOC_FAULT_PAGE) {
617 		if (arg == NULL)
618 			return (EINVAL);
619 
620 		if (ddi_copyin((const void *)arg, (void *)&flt_page,
621 		    sizeof (mc_flt_page_t), 0) < 0)
622 			return (EFAULT);
623 
624 		return (mc_scf_log_event(&flt_page));
625 	}
626 #ifdef DEBUG
627 	return (mc_ioctl_debug(dev, cmd, arg, mode, credp, rvalp));
628 #else
629 	return (ENOTTY);
630 #endif
631 }
632 
633 /*
634  * PA validity check:
635  * This function return 1 if the PA is a valid PA
636  * in the running Solaris instance i.e. in physinstall
637  * Otherwise, return 0.
638  */
639 
640 /* ARGSUSED */
641 static int
642 pa_is_valid(mc_opl_t *mcp, uint64_t addr)
643 {
644 	if (mcp->mlist == NULL)
645 		mc_get_mlist(mcp);
646 
647 	if (mcp->mlist && address_in_memlist(mcp->mlist, addr, 0)) {
648 		return (1);
649 	}
650 	return (0);
651 }
652 
653 /*
654  * mac-pa translation routines.
655  *
656  *    Input: mc driver state, (LSB#, Bank#, DIMM address)
657  *    Output: physical address
658  *
659  *    Valid   - return value:  0
660  *    Invalid - return value: -1
661  */
662 static int
663 mcaddr_to_pa(mc_opl_t *mcp, mc_addr_t *maddr, uint64_t *pa)
664 {
665 	int i;
666 	uint64_t pa_offset = 0;
667 	int cs = (maddr->ma_dimm_addr >> CS_SHIFT) & 1;
668 	int bank = maddr->ma_bank;
669 	mc_addr_t maddr1;
670 	int bank0, bank1;
671 
672 	MC_LOG("mcaddr /LSB%d/B%d/%x\n", maddr->ma_bd, bank,
673 	    maddr->ma_dimm_addr);
674 
675 	/* loc validity check */
676 	ASSERT(maddr->ma_bd >= 0 && OPL_BOARD_MAX > maddr->ma_bd);
677 	ASSERT(bank >= 0 && OPL_BANK_MAX > bank);
678 
679 	/* Do translation */
680 	for (i = 0; i < PA_BITS_FOR_MAC; i++) {
681 		int pa_bit = 0;
682 		int mc_bit = mcp->mc_trans_table[cs][i];
683 		if (mc_bit < MC_ADDRESS_BITS) {
684 			pa_bit = (maddr->ma_dimm_addr >> mc_bit) & 1;
685 		} else if (mc_bit == MP_NONE) {
686 			pa_bit = 0;
687 		} else if (mc_bit == MP_BANK_0) {
688 			pa_bit = bank & 1;
689 		} else if (mc_bit == MP_BANK_1) {
690 			pa_bit = (bank >> 1) & 1;
691 		} else if (mc_bit == MP_BANK_2) {
692 			pa_bit = (bank >> 2) & 1;
693 		}
694 		pa_offset |= ((uint64_t)pa_bit) << i;
695 	}
696 	*pa = mcp->mc_start_address + pa_offset;
697 	MC_LOG("pa = %lx\n", *pa);
698 
699 	if (pa_to_maddr(mcp, *pa, &maddr1) == -1) {
700 		cmn_err(CE_WARN, "mcaddr_to_pa: /LSB%d/B%d/%x failed to "
701 		    "convert PA %lx\n", maddr->ma_bd, bank,
702 		    maddr->ma_dimm_addr, *pa);
703 		return (-1);
704 	}
705 
706 	/*
707 	 * In mirror mode, PA is always translated to the even bank.
708 	 */
709 	if (IS_MIRROR(mcp, maddr->ma_bank)) {
710 		bank0 = maddr->ma_bank & ~(1);
711 		bank1 = maddr1.ma_bank & ~(1);
712 	} else {
713 		bank0 = maddr->ma_bank;
714 		bank1 = maddr1.ma_bank;
715 	}
716 	/*
717 	 * there is no need to check ma_bd because it is generated from
718 	 * mcp.  They are the same.
719 	 */
720 	if ((bank0 == bank1) && (maddr->ma_dimm_addr ==
721 	    maddr1.ma_dimm_addr)) {
722 		return (0);
723 	} else {
724 		MC_LOG("Translation error source /LSB%d/B%d/%x, "
725 		    "PA %lx, target /LSB%d/B%d/%x\n", maddr->ma_bd, bank,
726 		    maddr->ma_dimm_addr, *pa, maddr1.ma_bd, maddr1.ma_bank,
727 		    maddr1.ma_dimm_addr);
728 		return (-1);
729 	}
730 }
731 
732 /*
733  * PA to CS (used by pa_to_maddr).
734  */
735 static int
736 pa_to_cs(mc_opl_t *mcp, uint64_t pa_offset)
737 {
738 	int i;
739 	int cs = 1;
740 
741 	for (i = 0; i < PA_BITS_FOR_MAC; i++) {
742 		/* MAC address bit<29> is arranged on the same PA bit */
743 		/* on both table. So we may use any table. */
744 		if (mcp->mc_trans_table[0][i] == CS_SHIFT) {
745 			cs = (pa_offset >> i) & 1;
746 			break;
747 		}
748 	}
749 	return (cs);
750 }
751 
752 /*
753  * PA to DIMM (used by pa_to_maddr).
754  */
755 /* ARGSUSED */
756 static uint32_t
757 pa_to_dimm(mc_opl_t *mcp, uint64_t pa_offset)
758 {
759 	int i;
760 	int cs = pa_to_cs(mcp, pa_offset);
761 	uint32_t dimm_addr = 0;
762 
763 	for (i = 0; i < PA_BITS_FOR_MAC; i++) {
764 		int pa_bit_value = (pa_offset >> i) & 1;
765 		int mc_bit = mcp->mc_trans_table[cs][i];
766 		if (mc_bit < MC_ADDRESS_BITS) {
767 			dimm_addr |= pa_bit_value << mc_bit;
768 		}
769 	}
770 	dimm_addr |= cs << CS_SHIFT;
771 	return (dimm_addr);
772 }
773 
774 /*
775  * PA to Bank (used by pa_to_maddr).
776  */
777 static int
778 pa_to_bank(mc_opl_t *mcp, uint64_t pa_offset)
779 {
780 	int i;
781 	int cs = pa_to_cs(mcp, pa_offset);
782 	int bankno = mcp->mc_trans_table[cs][INDEX_OF_BANK_SUPPLEMENT_BIT];
783 
784 
785 	for (i = 0; i < PA_BITS_FOR_MAC; i++) {
786 		int pa_bit_value = (pa_offset >> i) & 1;
787 		int mc_bit = mcp->mc_trans_table[cs][i];
788 		switch (mc_bit) {
789 		case MP_BANK_0:
790 			bankno |= pa_bit_value;
791 			break;
792 		case MP_BANK_1:
793 			bankno |= pa_bit_value << 1;
794 			break;
795 		case MP_BANK_2:
796 			bankno |= pa_bit_value << 2;
797 			break;
798 		}
799 	}
800 
801 	return (bankno);
802 }
803 
804 /*
805  * PA to MAC address translation
806  *
807  *   Input: MAC driver state, physicall adress
808  *   Output: LSB#, Bank id, mac address
809  *
810  *    Valid   - return value:  0
811  *    Invalid - return value: -1
812  */
813 
814 int
815 pa_to_maddr(mc_opl_t *mcp, uint64_t pa, mc_addr_t *maddr)
816 {
817 	uint64_t pa_offset;
818 
819 	if (!mc_rangecheck_pa(mcp, pa))
820 		return (-1);
821 
822 	/* Do translation */
823 	pa_offset = pa - mcp->mc_start_address;
824 
825 	maddr->ma_bd = mcp->mc_board_num;
826 	maddr->ma_phys_bd = mcp->mc_phys_board_num;
827 	maddr->ma_bank = pa_to_bank(mcp, pa_offset);
828 	maddr->ma_dimm_addr = pa_to_dimm(mcp, pa_offset);
829 	MC_LOG("pa %lx -> mcaddr /LSB%d/B%d/%x\n", pa_offset, maddr->ma_bd,
830 	    maddr->ma_bank, maddr->ma_dimm_addr);
831 	return (0);
832 }
833 
834 /*
835  * UNUM format for DC is "/CMUnn/MEMxyZ", where
836  *	nn = 00..03 for DC1 and 00..07 for DC2 and 00..15 for DC3.
837  *	x = MAC 0..3
838  *	y = 0..3 (slot info).
839  *	Z = 'A' or 'B'
840  *
841  * UNUM format for FF1 is "/MBU_A/MEMBx/MEMyZ", where
842  *	x = 0..3 (MEMB number)
843  *	y = 0..3 (slot info).
844  *	Z = 'A' or 'B'
845  *
846  * UNUM format for FF2 is "/MBU_B/MEMBx/MEMyZ", where
847  *	x = 0..7 (MEMB number)
848  *	y = 0..3 (slot info).
849  *	Z = 'A' or 'B'
850  *
851  * UNUM format for IKKAKU is "/MBU_A/MEMyZ", where
852  *	y = 0..3 (slot info).
853  *	Z = 'A' or 'B'
854  *
855  */
856 int
857 mc_set_mem_unum(char *buf, int buflen, int sb, int bank,
858     uint32_t mf_type, uint32_t d_slot)
859 {
860 	char *dimmnm;
861 	char memb_num;
862 	int cs;
863 	int i;
864 	int j;
865 
866 	cs = SLOT_TO_CS(d_slot);
867 
868 	switch (plat_model) {
869 	case MODEL_DC:
870 		if (mf_type == FLT_TYPE_INTERMITTENT_CE ||
871 		    mf_type == FLT_TYPE_PERMANENT_CE) {
872 			i = BD_BK_SLOT_TO_INDEX(0, bank, d_slot);
873 			dimmnm = mc_dc_dimm_unum_table[i];
874 			(void) snprintf(buf, buflen, "/%s%02d/MEM%s",
875 			    model_names[plat_model].unit_name, sb, dimmnm);
876 		} else {
877 			i = BD_BK_SLOT_TO_INDEX(0, bank, 0);
878 			j = (cs == 0) ?  i : i + 2;
879 			(void) snprintf(buf, buflen, "/%s%02d/MEM%s MEM%s",
880 			    model_names[plat_model].unit_name, sb,
881 			    mc_dc_dimm_unum_table[j],
882 			    mc_dc_dimm_unum_table[j + 1]);
883 		}
884 		break;
885 	case MODEL_FF1:
886 	case MODEL_FF2:
887 		if (mf_type == FLT_TYPE_INTERMITTENT_CE ||
888 		    mf_type == FLT_TYPE_PERMANENT_CE) {
889 			i = BD_BK_SLOT_TO_INDEX(sb, bank, d_slot);
890 			dimmnm = mc_ff_dimm_unum_table[i];
891 			memb_num = dimmnm[0];
892 			(void) snprintf(buf, buflen, "/%s/%s%c/MEM%s",
893 			    model_names[plat_model].unit_name,
894 			    model_names[plat_model].mem_name,
895 			    memb_num, &dimmnm[1]);
896 		} else {
897 			i = BD_BK_SLOT_TO_INDEX(sb, bank, 0);
898 			j = (cs == 0) ?  i : i + 2;
899 			memb_num = mc_ff_dimm_unum_table[i][0],
900 			    (void) snprintf(buf, buflen, "/%s/%s%c/MEM%s MEM%s",
901 			    model_names[plat_model].unit_name,
902 			    model_names[plat_model].mem_name, memb_num,
903 			    &mc_ff_dimm_unum_table[j][1],
904 			    &mc_ff_dimm_unum_table[j + 1][1]);
905 		}
906 		break;
907 	case MODEL_IKKAKU:
908 		if (mf_type == FLT_TYPE_INTERMITTENT_CE ||
909 		    mf_type == FLT_TYPE_PERMANENT_CE) {
910 			i = BD_BK_SLOT_TO_INDEX(sb, bank, d_slot);
911 			dimmnm = mc_ff_dimm_unum_table[i];
912 			(void) snprintf(buf, buflen, "/%s/MEM%s",
913 			    model_names[plat_model].unit_name, &dimmnm[1]);
914 		} else {
915 			i = BD_BK_SLOT_TO_INDEX(sb, bank, 0);
916 			j = (cs == 0) ?  i : i + 2;
917 			memb_num = mc_ff_dimm_unum_table[i][0],
918 			    (void) snprintf(buf, buflen, "/%s/MEM%s MEM%s",
919 			    model_names[plat_model].unit_name,
920 			    &mc_ff_dimm_unum_table[j][1],
921 			    &mc_ff_dimm_unum_table[j + 1][1]);
922 		}
923 		break;
924 	default:
925 		return (-1);
926 	}
927 	return (0);
928 }
929 
930 static void
931 mc_ereport_post(mc_aflt_t *mc_aflt)
932 {
933 	char buf[FM_MAX_CLASS];
934 	char device_path[MAXPATHLEN];
935 	char sid[MAXPATHLEN];
936 	nv_alloc_t *nva = NULL;
937 	nvlist_t *ereport, *detector, *resource;
938 	errorq_elem_t *eqep;
939 	int nflts;
940 	mc_flt_stat_t *flt_stat;
941 	int i, n;
942 	int blen = MAXPATHLEN;
943 	char *p, *s = NULL;
944 	uint32_t values[2], synd[2], dslot[2];
945 	uint64_t offset = (uint64_t)-1;
946 	int ret = -1;
947 
948 	if (panicstr) {
949 		eqep = errorq_reserve(ereport_errorq);
950 		if (eqep == NULL)
951 			return;
952 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
953 		nva = errorq_elem_nva(ereport_errorq, eqep);
954 	} else {
955 		ereport = fm_nvlist_create(nva);
956 	}
957 
958 	/*
959 	 * Create the scheme "dev" FMRI.
960 	 */
961 	detector = fm_nvlist_create(nva);
962 	resource = fm_nvlist_create(nva);
963 
964 	nflts = mc_aflt->mflt_nflts;
965 
966 	ASSERT(nflts >= 1 && nflts <= 2);
967 
968 	flt_stat = mc_aflt->mflt_stat[0];
969 	(void) ddi_pathname(mc_aflt->mflt_mcp->mc_dip, device_path);
970 	(void) fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
971 	    device_path, NULL);
972 
973 	/*
974 	 * Encode all the common data into the ereport.
975 	 */
976 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s", MC_OPL_ERROR_CLASS,
977 	    mc_aflt->mflt_is_ptrl ? MC_OPL_PTRL_SUBCLASS : MC_OPL_MI_SUBCLASS,
978 	    mc_aflt->mflt_erpt_class);
979 
980 	MC_LOG("mc_ereport_post: ereport %s\n", buf);
981 
982 
983 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
984 	    fm_ena_generate(mc_aflt->mflt_id, FM_ENA_FMT1), detector, NULL);
985 
986 	/*
987 	 * Set payload.
988 	 */
989 	fm_payload_set(ereport, MC_OPL_BOARD, DATA_TYPE_UINT32,
990 	    flt_stat->mf_flt_maddr.ma_bd, NULL);
991 
992 	fm_payload_set(ereport, MC_OPL_PA, DATA_TYPE_UINT64,
993 	    flt_stat->mf_flt_paddr, NULL);
994 
995 	if (flt_stat->mf_type == FLT_TYPE_INTERMITTENT_CE ||
996 	    flt_stat->mf_type == FLT_TYPE_PERMANENT_CE) {
997 		fm_payload_set(ereport, MC_OPL_FLT_TYPE, DATA_TYPE_UINT8,
998 		    ECC_STICKY, NULL);
999 	}
1000 
1001 	for (i = 0; i < nflts; i++)
1002 		values[i] = mc_aflt->mflt_stat[i]->mf_flt_maddr.ma_bank;
1003 
1004 	fm_payload_set(ereport, MC_OPL_BANK, DATA_TYPE_UINT32_ARRAY, nflts,
1005 	    values, NULL);
1006 
1007 	for (i = 0; i < nflts; i++)
1008 		values[i] = mc_aflt->mflt_stat[i]->mf_cntl;
1009 
1010 	fm_payload_set(ereport, MC_OPL_STATUS, DATA_TYPE_UINT32_ARRAY, nflts,
1011 	    values, NULL);
1012 
1013 	for (i = 0; i < nflts; i++)
1014 		values[i] = mc_aflt->mflt_stat[i]->mf_err_add;
1015 
1016 	/* offset is set only for PCE and ICE */
1017 	if (mc_aflt->mflt_stat[0]->mf_type == FLT_TYPE_INTERMITTENT_CE ||
1018 	    mc_aflt->mflt_stat[0]->mf_type == FLT_TYPE_PERMANENT_CE) {
1019 		offset = values[0];
1020 
1021 	}
1022 	fm_payload_set(ereport, MC_OPL_ERR_ADD, DATA_TYPE_UINT32_ARRAY, nflts,
1023 	    values, NULL);
1024 
1025 	for (i = 0; i < nflts; i++)
1026 		values[i] = mc_aflt->mflt_stat[i]->mf_err_log;
1027 
1028 	fm_payload_set(ereport, MC_OPL_ERR_LOG, DATA_TYPE_UINT32_ARRAY, nflts,
1029 	    values, NULL);
1030 
1031 	for (i = 0; i < nflts; i++) {
1032 		flt_stat = mc_aflt->mflt_stat[i];
1033 		if (flt_stat->mf_errlog_valid) {
1034 			synd[i] = flt_stat->mf_synd;
1035 			dslot[i] = flt_stat->mf_dimm_slot;
1036 			values[i] = flt_stat->mf_dram_place;
1037 		} else {
1038 			synd[i] = 0;
1039 			dslot[i] = 0;
1040 			values[i] = 0;
1041 		}
1042 	}
1043 
1044 	fm_payload_set(ereport, MC_OPL_ERR_SYND, DATA_TYPE_UINT32_ARRAY, nflts,
1045 	    synd, NULL);
1046 
1047 	fm_payload_set(ereport, MC_OPL_ERR_DIMMSLOT, DATA_TYPE_UINT32_ARRAY,
1048 	    nflts, dslot, NULL);
1049 
1050 	fm_payload_set(ereport, MC_OPL_ERR_DRAM, DATA_TYPE_UINT32_ARRAY, nflts,
1051 	    values, NULL);
1052 
1053 	device_path[0] = 0;
1054 	p = &device_path[0];
1055 	sid[0] = 0;
1056 	s = &sid[0];
1057 	ret = 0;
1058 
1059 	for (i = 0; i < nflts; i++) {
1060 		int bank;
1061 
1062 		flt_stat = mc_aflt->mflt_stat[i];
1063 		bank = flt_stat->mf_flt_maddr.ma_bank;
1064 		ret = mc_set_mem_unum(p + strlen(p), blen,
1065 		    flt_stat->mf_flt_maddr.ma_phys_bd, bank, flt_stat->mf_type,
1066 		    flt_stat->mf_dimm_slot);
1067 
1068 		if (ret != 0) {
1069 			cmn_err(CE_WARN,
1070 			    "mc_ereport_post: Failed to determine the unum "
1071 			    "for board=%d bank=%d type=0x%x slot=0x%x",
1072 			    flt_stat->mf_flt_maddr.ma_bd, bank,
1073 			    flt_stat->mf_type, flt_stat->mf_dimm_slot);
1074 			continue;
1075 		}
1076 		n = strlen(device_path);
1077 		blen = MAXPATHLEN - n;
1078 		p = &device_path[n];
1079 		if (i < (nflts - 1)) {
1080 			(void) snprintf(p, blen, " ");
1081 			blen--;
1082 			p++;
1083 		}
1084 
1085 		if (ret == 0) {
1086 			ret = mc_set_mem_sid(mc_aflt->mflt_mcp, s + strlen(s),
1087 			    blen, flt_stat->mf_flt_maddr.ma_phys_bd, bank,
1088 			    flt_stat->mf_type, flt_stat->mf_dimm_slot);
1089 
1090 		}
1091 	}
1092 
1093 	(void) fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION, NULL,
1094 	    device_path, (ret == 0) ? sid : NULL, (ret == 0) ? offset :
1095 	    (uint64_t)-1);
1096 
1097 	fm_payload_set(ereport, MC_OPL_RESOURCE, DATA_TYPE_NVLIST, resource,
1098 	    NULL);
1099 
1100 	if (panicstr) {
1101 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1102 	} else {
1103 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1104 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1105 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1106 		fm_nvlist_destroy(resource, FM_NVA_FREE);
1107 	}
1108 }
1109 
1110 
1111 static void
1112 mc_err_drain(mc_aflt_t *mc_aflt)
1113 {
1114 	int rv;
1115 	uint64_t pa = (uint64_t)(-1);
1116 	int i;
1117 
1118 	MC_LOG("mc_err_drain: %s\n", mc_aflt->mflt_erpt_class);
1119 	/*
1120 	 * we come here only when we have:
1121 	 * In mirror mode: MUE, SUE
1122 	 * In normal mode: UE, Permanent CE, Intermittent CE
1123 	 */
1124 	for (i = 0; i < mc_aflt->mflt_nflts; i++) {
1125 		rv = mcaddr_to_pa(mc_aflt->mflt_mcp,
1126 		    &(mc_aflt->mflt_stat[i]->mf_flt_maddr), &pa);
1127 
1128 		/* Ensure the pa is valid (not in isolated memory block) */
1129 		if (rv == 0 && pa_is_valid(mc_aflt->mflt_mcp, pa))
1130 			mc_aflt->mflt_stat[i]->mf_flt_paddr = pa;
1131 		else
1132 			mc_aflt->mflt_stat[i]->mf_flt_paddr = (uint64_t)-1;
1133 	}
1134 
1135 	MC_LOG("mc_err_drain:pa = %lx\n", pa);
1136 
1137 	switch (page_retire_check(pa, NULL)) {
1138 	case 0:
1139 	case EAGAIN:
1140 		MC_LOG("Page retired or pending\n");
1141 		return;
1142 	case EIO:
1143 		/*
1144 		 * Do page retirement except for the PCE and ICE cases.
1145 		 * This is taken care by the OPL DE
1146 		 */
1147 		if (mc_aflt->mflt_stat[0]->mf_type !=
1148 		    FLT_TYPE_INTERMITTENT_CE &&
1149 		    mc_aflt->mflt_stat[0]->mf_type != FLT_TYPE_PERMANENT_CE) {
1150 			MC_LOG("offline page at pa %lx error %x\n", pa,
1151 			    mc_aflt->mflt_pr);
1152 			(void) page_retire(pa, mc_aflt->mflt_pr);
1153 		}
1154 		break;
1155 	case EINVAL:
1156 	default:
1157 		/*
1158 		 * Some memory do not have page structure so
1159 		 * we keep going in case of EINVAL.
1160 		 */
1161 		break;
1162 	}
1163 
1164 	for (i = 0; i < mc_aflt->mflt_nflts; i++) {
1165 		mc_aflt_t mc_aflt0;
1166 		if (mc_aflt->mflt_stat[i]->mf_flt_paddr != (uint64_t)-1) {
1167 			mc_aflt0 = *mc_aflt;
1168 			mc_aflt0.mflt_nflts = 1;
1169 			mc_aflt0.mflt_stat[0] = mc_aflt->mflt_stat[i];
1170 			mc_ereport_post(&mc_aflt0);
1171 		}
1172 	}
1173 }
1174 
1175 /*
1176  * The restart address is actually defined in unit of PA[37:6]
1177  * the mac patrol will convert that to dimm offset.  If the
1178  * address is not in the bank, it will continue to search for
1179  * the next PA that is within the bank.
1180  *
1181  * Also the mac patrol scans the dimms based on PA, not
1182  * dimm offset.
1183  */
1184 static int
1185 restart_patrol(mc_opl_t *mcp, int bank, mc_rsaddr_info_t *rsaddr_info)
1186 {
1187 	uint64_t pa;
1188 	int rv;
1189 
1190 	if (MC_REWRITE_MODE(mcp, bank)) {
1191 		return (0);
1192 	}
1193 	if (rsaddr_info == NULL || (rsaddr_info->mi_valid == 0)) {
1194 		MAC_PTRL_START(mcp, bank);
1195 		return (0);
1196 	}
1197 
1198 	rv = mcaddr_to_pa(mcp, &rsaddr_info->mi_restartaddr, &pa);
1199 	if (rv != 0) {
1200 		MC_LOG("cannot convert mcaddr to pa. use auto restart\n");
1201 		MAC_PTRL_START(mcp, bank);
1202 		return (0);
1203 	}
1204 
1205 	if (!mc_rangecheck_pa(mcp, pa)) {
1206 		/* pa is not on this board, just retry */
1207 		cmn_err(CE_WARN, "restart_patrol: invalid address %lx "
1208 		    "on board %d\n", pa, mcp->mc_board_num);
1209 		MAC_PTRL_START(mcp, bank);
1210 		return (0);
1211 	}
1212 
1213 	MC_LOG("restart_patrol: pa = %lx\n", pa);
1214 
1215 	if (!rsaddr_info->mi_injectrestart) {
1216 		/*
1217 		 * For non-error injection restart we need to
1218 		 * determine if the current restart pa/page is
1219 		 * a "good" page. A "good" page is a page that
1220 		 * has not been page retired. If the current
1221 		 * page that contains the pa is "good", we will
1222 		 * do a HW auto restart and let HW patrol continue
1223 		 * where it last stopped. Most desired scenario.
1224 		 *
1225 		 * If the current page is not "good", we will advance
1226 		 * to the next page to find the next "good" page and
1227 		 * restart the patrol from there.
1228 		 */
1229 		int wrapcount = 0;
1230 		uint64_t origpa = pa;
1231 		while (wrapcount < 2) {
1232 			if (!pa_is_valid(mcp, pa)) {
1233 			/*
1234 			 * Not in physinstall - advance to the
1235 			 * next memory isolation blocksize
1236 			 */
1237 			MC_LOG("Invalid PA\n");
1238 			pa = roundup(pa + 1, mc_isolation_bsize);
1239 			} else {
1240 			int rv;
1241 			if ((rv = page_retire_check(pa, NULL)) != 0 &&
1242 			    rv != EAGAIN) {
1243 					/*
1244 					 * The page is "good" (not retired),
1245 					 * we will use automatic HW restart
1246 					 * algorithm if this is the original
1247 					 * current starting page.
1248 					 */
1249 				if (pa == origpa) {
1250 					MC_LOG("Page has no error. "
1251 					    "Auto restart\n");
1252 					MAC_PTRL_START(mcp, bank);
1253 					return (0);
1254 				} else {
1255 					/*
1256 					 * found a subsequent good page
1257 					 */
1258 					break;
1259 				}
1260 			}
1261 
1262 			/*
1263 			 * Skip to the next page
1264 			 */
1265 			pa = roundup(pa + 1, PAGESIZE);
1266 			MC_LOG("Skipping bad page to %lx\n", pa);
1267 			}
1268 
1269 		    /* Check to see if we hit the end of the memory range */
1270 			if (pa >= (mcp->mc_start_address + mcp->mc_size)) {
1271 			MC_LOG("Wrap around\n");
1272 			pa = mcp->mc_start_address;
1273 			wrapcount++;
1274 			}
1275 		}
1276 
1277 		if (wrapcount > 1) {
1278 			MC_LOG("Failed to find a good page. Just restart\n");
1279 			MAC_PTRL_START(mcp, bank);
1280 			return (0);
1281 		}
1282 	}
1283 
1284 	/*
1285 	 * We reached here either:
1286 	 * 1. We are doing an error injection restart that specify
1287 	 *    the exact pa/page to restart. OR
1288 	 * 2. We found a subsequent good page different from the
1289 	 *    original restart pa/page.
1290 	 * Restart MAC patrol: PA[37:6]
1291 	 */
1292 	MC_LOG("restart at pa = %lx\n", pa);
1293 	ST_MAC_REG(MAC_RESTART_ADD(mcp, bank), MAC_RESTART_PA(pa));
1294 	MAC_PTRL_START_ADD(mcp, bank);
1295 
1296 	return (0);
1297 }
1298 
1299 static void
1300 mc_retry_info_put(mc_retry_info_t **q, mc_retry_info_t *p)
1301 {
1302 	ASSERT(p != NULL);
1303 	p->ri_next = *q;
1304 	*q = p;
1305 }
1306 
1307 static mc_retry_info_t *
1308 mc_retry_info_get(mc_retry_info_t **q)
1309 {
1310 	mc_retry_info_t *p;
1311 
1312 	if ((p = *q) != NULL) {
1313 		*q = p->ri_next;
1314 		return (p);
1315 	} else {
1316 		return (NULL);
1317 	}
1318 }
1319 
1320 /*
1321  * Rewriting is used for two purposes.
1322  *  - to correct the error in memory.
1323  *  - to determine whether the error is permanent or intermittent.
1324  * It's done by writing the address in MAC_BANKm_REWRITE_ADD
1325  * and issuing REW_REQ command in MAC_BANKm_PTRL_CNRL. After that,
1326  * REW_END (and REW_CE/REW_UE if some error detected) is set when
1327  * rewrite operation is done. See 4.7.3 and 4.7.11 in Columbus2 PRM.
1328  *
1329  * Note that rewrite operation doesn't change RAW_UE to Marked UE.
1330  * Therefore, we use it only CE case.
1331  */
1332 
1333 static uint32_t
1334 do_rewrite(mc_opl_t *mcp, int bank, uint32_t dimm_addr, int retrying)
1335 {
1336 	uint32_t cntl;
1337 	int count = 0;
1338 	int max_count;
1339 	int retry_state;
1340 
1341 	if (retrying)
1342 		max_count = 1;
1343 	else
1344 		max_count = mc_max_rewrite_loop;
1345 
1346 	retry_state = RETRY_STATE_PENDING;
1347 
1348 	if (!retrying && MC_REWRITE_MODE(mcp, bank)) {
1349 		goto timeout;
1350 	}
1351 
1352 	retry_state = RETRY_STATE_ACTIVE;
1353 
1354 	/* first wait to make sure PTRL_STATUS is 0 */
1355 	while (count++ < max_count) {
1356 		cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
1357 		if (!(cntl & MAC_CNTL_PTRL_STATUS)) {
1358 			count = 0;
1359 			break;
1360 		}
1361 		drv_usecwait(mc_rewrite_delay);
1362 	}
1363 	if (count >= max_count)
1364 		goto timeout;
1365 
1366 	count = 0;
1367 
1368 	ST_MAC_REG(MAC_REWRITE_ADD(mcp, bank), dimm_addr);
1369 	MAC_REW_REQ(mcp, bank);
1370 
1371 	retry_state = RETRY_STATE_REWRITE;
1372 
1373 	do {
1374 		if (count++ > max_count) {
1375 			goto timeout;
1376 		} else {
1377 			drv_usecwait(mc_rewrite_delay);
1378 		}
1379 		cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
1380 	/*
1381 	 * If there are other MEMORY or PCI activities, this
1382 	 * will be BUSY, else it should be set immediately
1383 	 */
1384 	} while (!(cntl & MAC_CNTL_REW_END));
1385 
1386 	MAC_CLEAR_ERRS(mcp, bank, MAC_CNTL_REW_ERRS);
1387 	return (cntl);
1388 timeout:
1389 	mc_set_rewrite(mcp, bank, dimm_addr, retry_state);
1390 
1391 	return (0);
1392 }
1393 
1394 void
1395 mc_clear_rewrite(mc_opl_t *mcp, int bank)
1396 {
1397 	struct mc_bank *bankp;
1398 	mc_retry_info_t *retry;
1399 	uint32_t rew_addr;
1400 
1401 	bankp = &(mcp->mc_bank[bank]);
1402 	retry = bankp->mcb_active;
1403 	bankp->mcb_active = NULL;
1404 	mc_retry_info_put(&bankp->mcb_retry_freelist, retry);
1405 
1406 again:
1407 	bankp->mcb_rewrite_count = 0;
1408 
1409 	while (retry = mc_retry_info_get(&bankp->mcb_retry_pending)) {
1410 		rew_addr = retry->ri_addr;
1411 		mc_retry_info_put(&bankp->mcb_retry_freelist, retry);
1412 		if (do_rewrite(mcp, bank, rew_addr, 1) == 0)
1413 			break;
1414 	}
1415 
1416 	/* we break out if no more pending rewrite or we got timeout again */
1417 
1418 	if (!bankp->mcb_active && !bankp->mcb_retry_pending) {
1419 		if (!IS_MIRROR(mcp, bank)) {
1420 			MC_CLEAR_REWRITE_MODE(mcp, bank);
1421 		} else {
1422 			int mbank = bank ^ 1;
1423 			bankp = &(mcp->mc_bank[mbank]);
1424 			if (!bankp->mcb_active && !bankp->mcb_retry_pending) {
1425 			MC_CLEAR_REWRITE_MODE(mcp, bank);
1426 			MC_CLEAR_REWRITE_MODE(mcp, mbank);
1427 			} else {
1428 			bank = mbank;
1429 			goto again;
1430 			}
1431 		}
1432 	}
1433 }
1434 
1435 void
1436 mc_set_rewrite(mc_opl_t *mcp, int bank, uint32_t addr, int state)
1437 {
1438 	mc_retry_info_t *retry;
1439 	struct mc_bank *bankp;
1440 
1441 	bankp = &mcp->mc_bank[bank];
1442 
1443 	retry = mc_retry_info_get(&bankp->mcb_retry_freelist);
1444 
1445 	if (retry == NULL) {
1446 		mc_addr_t maddr;
1447 		uint64_t paddr;
1448 		/*
1449 		 * previous rewrite request has not completed yet.
1450 		 * So we discard this rewrite request.
1451 		 */
1452 		maddr.ma_bd = mcp->mc_board_num;
1453 		maddr.ma_bank =  bank;
1454 		maddr.ma_dimm_addr = addr;
1455 		if (mcaddr_to_pa(mcp, &maddr, &paddr) == 0) {
1456 			cmn_err(CE_WARN, "Discard CE rewrite request"
1457 			    " for 0x%lx (/LSB%d/B%d/%x).\n",
1458 			    paddr, mcp->mc_board_num, bank, addr);
1459 		} else {
1460 			cmn_err(CE_WARN, "Discard CE rewrite request"
1461 			    " for /LSB%d/B%d/%x.\n",
1462 			    mcp->mc_board_num, bank, addr);
1463 		}
1464 		return;
1465 	}
1466 
1467 	retry->ri_addr = addr;
1468 	retry->ri_state = state;
1469 
1470 	MC_SET_REWRITE_MODE(mcp, bank);
1471 
1472 	if ((state > RETRY_STATE_PENDING)) {
1473 		ASSERT(bankp->mcb_active == NULL);
1474 		bankp->mcb_active = retry;
1475 	} else {
1476 		mc_retry_info_put(&bankp->mcb_retry_pending, retry);
1477 	}
1478 
1479 	if (IS_MIRROR(mcp, bank)) {
1480 		int mbank = bank ^1;
1481 		MC_SET_REWRITE_MODE(mcp, mbank);
1482 	}
1483 }
1484 
1485 void
1486 mc_process_scf_log(mc_opl_t *mcp)
1487 {
1488 	int count;
1489 	int n = 0;
1490 	scf_log_t *p;
1491 	int bank;
1492 
1493 	for (bank = 0; bank < BANKNUM_PER_SB; bank++) {
1494 		while ((p = mcp->mc_scf_log[bank]) != NULL &&
1495 		    (n < mc_max_errlog_processed)) {
1496 		ASSERT(bank == p->sl_bank);
1497 		count = 0;
1498 		while ((LD_MAC_REG(MAC_STATIC_ERR_ADD(mcp, p->sl_bank))
1499 		    & MAC_STATIC_ERR_VLD)) {
1500 			if (count++ >= (mc_max_scf_loop)) {
1501 				break;
1502 			}
1503 			drv_usecwait(mc_scf_delay);
1504 		}
1505 
1506 		if (count < mc_max_scf_loop) {
1507 			ST_MAC_REG(MAC_STATIC_ERR_LOG(mcp, p->sl_bank),
1508 			    p->sl_err_log);
1509 
1510 			ST_MAC_REG(MAC_STATIC_ERR_ADD(mcp, p->sl_bank),
1511 			    p->sl_err_add|MAC_STATIC_ERR_VLD);
1512 			mcp->mc_scf_retry[bank] = 0;
1513 		} else {
1514 			/*
1515 			 * if we try too many times, just drop the req
1516 			 */
1517 			if (mcp->mc_scf_retry[bank]++ <=
1518 			    mc_max_scf_retry) {
1519 				return;
1520 			} else {
1521 				if ((++mc_pce_dropped & 0xff) == 0) {
1522 					cmn_err(CE_WARN, "Cannot "
1523 					    "report CE to SCF\n");
1524 				}
1525 			}
1526 		}
1527 		n++;
1528 		mcp->mc_scf_log[bank] = p->sl_next;
1529 		mcp->mc_scf_total[bank]--;
1530 		ASSERT(mcp->mc_scf_total[bank] >= 0);
1531 		kmem_free(p, sizeof (scf_log_t));
1532 		}
1533 	}
1534 }
1535 void
1536 mc_queue_scf_log(mc_opl_t *mcp, mc_flt_stat_t *flt_stat, int bank)
1537 {
1538 	scf_log_t *p;
1539 
1540 	if (mcp->mc_scf_total[bank] >= mc_max_scf_logs) {
1541 		if ((++mc_pce_dropped & 0xff) == 0) {
1542 			cmn_err(CE_WARN, "Too many CE requests.\n");
1543 		}
1544 		return;
1545 	}
1546 	p = kmem_zalloc(sizeof (scf_log_t), KM_SLEEP);
1547 	p->sl_next = 0;
1548 	p->sl_err_add = flt_stat->mf_err_add;
1549 	p->sl_err_log = flt_stat->mf_err_log;
1550 	p->sl_bank = bank;
1551 
1552 	if (mcp->mc_scf_log[bank] == NULL) {
1553 		/*
1554 		 * we rely on mc_scf_log to detect NULL queue.
1555 		 * mc_scf_log_tail is irrelevant is such case.
1556 		 */
1557 		mcp->mc_scf_log_tail[bank] = mcp->mc_scf_log[bank] = p;
1558 	} else {
1559 		mcp->mc_scf_log_tail[bank]->sl_next = p;
1560 		mcp->mc_scf_log_tail[bank] = p;
1561 	}
1562 	mcp->mc_scf_total[bank]++;
1563 }
1564 /*
1565  * This routine determines what kind of CE happens, intermittent
1566  * or permanent as follows. (See 4.7.3 in Columbus2 PRM.)
1567  * - Do rewrite by issuing REW_REQ command to MAC_PTRL_CNTL register.
1568  * - If CE is still detected on the same address even after doing
1569  *   rewrite operation twice, it is determined as permanent error.
1570  * - If error is not detected anymore, it is determined as intermittent
1571  *   error.
1572  * - If UE is detected due to rewrite operation, it should be treated
1573  *   as UE.
1574  */
1575 
1576 /* ARGSUSED */
1577 static void
1578 mc_scrub_ce(mc_opl_t *mcp, int bank, mc_flt_stat_t *flt_stat, int ptrl_error)
1579 {
1580 	uint32_t cntl;
1581 	int i;
1582 
1583 	flt_stat->mf_type = FLT_TYPE_PERMANENT_CE;
1584 	/*
1585 	 * rewrite request 1st time reads and correct error data
1586 	 * and write to DIMM.  2nd rewrite request must be issued
1587 	 * after REW_CE/UE/END is 0.  When the 2nd request is completed,
1588 	 * if REW_CE = 1, then it is permanent CE.
1589 	 */
1590 	for (i = 0; i < 2; i++) {
1591 		cntl = do_rewrite(mcp, bank, flt_stat->mf_err_add, 0);
1592 
1593 		if (cntl == 0) {
1594 			/* timeout case */
1595 			return;
1596 		}
1597 		/*
1598 		 * If the error becomes UE or CMPE
1599 		 * we return to the caller immediately.
1600 		 */
1601 		if (cntl & MAC_CNTL_REW_UE) {
1602 			if (ptrl_error)
1603 				flt_stat->mf_cntl |= MAC_CNTL_PTRL_UE;
1604 			else
1605 				flt_stat->mf_cntl |= MAC_CNTL_MI_UE;
1606 			flt_stat->mf_type = FLT_TYPE_UE;
1607 			return;
1608 		}
1609 		if (cntl & MAC_CNTL_REW_CMPE) {
1610 			if (ptrl_error)
1611 				flt_stat->mf_cntl |= MAC_CNTL_PTRL_CMPE;
1612 			else
1613 				flt_stat->mf_cntl |= MAC_CNTL_MI_CMPE;
1614 			flt_stat->mf_type = FLT_TYPE_CMPE;
1615 			return;
1616 		}
1617 	}
1618 	if (!(cntl & MAC_CNTL_REW_CE)) {
1619 		flt_stat->mf_type = FLT_TYPE_INTERMITTENT_CE;
1620 	}
1621 
1622 	if (flt_stat->mf_type == FLT_TYPE_PERMANENT_CE) {
1623 		/* report PERMANENT_CE to SP via SCF */
1624 		if (!(flt_stat->mf_err_log & MAC_ERR_LOG_INVALID)) {
1625 			mc_queue_scf_log(mcp, flt_stat, bank);
1626 		}
1627 	}
1628 }
1629 
1630 #define	IS_CMPE(cntl, f)	((cntl) & ((f) ? MAC_CNTL_PTRL_CMPE :\
1631 				MAC_CNTL_MI_CMPE))
1632 #define	IS_UE(cntl, f)	((cntl) & ((f) ? MAC_CNTL_PTRL_UE : MAC_CNTL_MI_UE))
1633 #define	IS_CE(cntl, f)	((cntl) & ((f) ? MAC_CNTL_PTRL_CE : MAC_CNTL_MI_CE))
1634 #define	IS_OK(cntl, f)	(!((cntl) & ((f) ? MAC_CNTL_PTRL_ERRS : \
1635 			MAC_CNTL_MI_ERRS)))
1636 
1637 
1638 static int
1639 IS_CE_ONLY(uint32_t cntl, int ptrl_error)
1640 {
1641 	if (ptrl_error) {
1642 		return ((cntl & MAC_CNTL_PTRL_ERRS) == MAC_CNTL_PTRL_CE);
1643 	} else {
1644 		return ((cntl & MAC_CNTL_MI_ERRS) == MAC_CNTL_MI_CE);
1645 	}
1646 }
1647 
1648 void
1649 mc_write_cntl(mc_opl_t *mcp, int bank, uint32_t value)
1650 {
1651 	int ebank = (IS_MIRROR(mcp, bank)) ? MIRROR_IDX(bank) : bank;
1652 
1653 	if (mcp->mc_speedup_period[ebank] > 0)
1654 		value |= mc_max_speed;
1655 	else
1656 		value |= mcp->mc_speed;
1657 	ST_MAC_REG(MAC_PTRL_CNTL(mcp, bank), value);
1658 }
1659 
1660 static void
1661 mc_read_ptrl_reg(mc_opl_t *mcp, int bank, mc_flt_stat_t *flt_stat)
1662 {
1663 	flt_stat->mf_cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank)) &
1664 	    MAC_CNTL_PTRL_ERRS;
1665 	flt_stat->mf_err_add = LD_MAC_REG(MAC_PTRL_ERR_ADD(mcp, bank));
1666 	flt_stat->mf_err_log = LD_MAC_REG(MAC_PTRL_ERR_LOG(mcp, bank));
1667 	flt_stat->mf_flt_maddr.ma_bd = mcp->mc_board_num;
1668 	flt_stat->mf_flt_maddr.ma_phys_bd = mcp->mc_phys_board_num;
1669 	flt_stat->mf_flt_maddr.ma_bank = bank;
1670 	flt_stat->mf_flt_maddr.ma_dimm_addr = flt_stat->mf_err_add;
1671 }
1672 
1673 static void
1674 mc_read_mi_reg(mc_opl_t *mcp, int bank, mc_flt_stat_t *flt_stat)
1675 {
1676 	uint32_t status, old_status;
1677 
1678 	status = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank)) & MAC_CNTL_MI_ERRS;
1679 	old_status = 0;
1680 
1681 	/* we keep reading until the status is stable */
1682 	while (old_status != status) {
1683 		old_status = status;
1684 		flt_stat->mf_err_add = LD_MAC_REG(MAC_MI_ERR_ADD(mcp, bank));
1685 		flt_stat->mf_err_log = LD_MAC_REG(MAC_MI_ERR_LOG(mcp, bank));
1686 		status = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank)) &
1687 		    MAC_CNTL_MI_ERRS;
1688 		if (status == old_status) {
1689 			break;
1690 		}
1691 	}
1692 
1693 	flt_stat->mf_cntl = status;
1694 	flt_stat->mf_flt_maddr.ma_bd = mcp->mc_board_num;
1695 	flt_stat->mf_flt_maddr.ma_phys_bd = mcp->mc_phys_board_num;
1696 	flt_stat->mf_flt_maddr.ma_bank = bank;
1697 	flt_stat->mf_flt_maddr.ma_dimm_addr = flt_stat->mf_err_add;
1698 }
1699 
1700 
1701 /*
1702  * Error philosophy for mirror mode:
1703  *
1704  * PTRL (The error address for both banks are same, since ptrl stops if it
1705  * detects error.)
1706  * - Compare error  log CMPE.
1707  *
1708  * - UE-UE           Report MUE.  No rewrite.
1709  *
1710  * - UE-*	     UE-(CE/OK). Rewrite to scrub UE.  Report SUE.
1711  *
1712  * - CE-*            CE-(CE/OK). Scrub to determine if CE is permanent.
1713  *                   If CE is permanent, inform SCF.  Once for each
1714  *		     Dimm.  If CE becomes UE or CMPE, go back to above.
1715  *
1716  *
1717  * MI (The error addresses for each bank are the same or different.)
1718  * - Compare  error  If addresses are the same.  Just CMPE, so log CMPE.
1719  *		     If addresses are different (this could happen
1720  *		     as a result of scrubbing.  Report each separately.
1721  *		     Only report error info on each side.
1722  *
1723  * - UE-UE           Addresses are the same.  Report MUE.
1724  *		     Addresses are different.  Report SUE on each bank.
1725  *		     Rewrite to clear UE.
1726  *
1727  * - UE-*	     UE-(CE/OK)
1728  *		     Rewrite to clear UE.  Report SUE for the bank.
1729  *
1730  * - CE-*            CE-(CE/OK).  Scrub to determine if CE is permanent.
1731  *                   If CE becomes UE or CMPE, go back to above.
1732  *
1733  */
1734 
1735 static int
1736 mc_process_error_mir(mc_opl_t *mcp, mc_aflt_t *mc_aflt, mc_flt_stat_t *flt_stat)
1737 {
1738 	int ptrl_error = mc_aflt->mflt_is_ptrl;
1739 	int i;
1740 	int rv = 0;
1741 	int bank;
1742 	int rewrite_timeout = 0;
1743 
1744 	MC_LOG("process mirror errors cntl[0] = %x, cntl[1] = %x\n",
1745 	    flt_stat[0].mf_cntl, flt_stat[1].mf_cntl);
1746 
1747 	if (ptrl_error) {
1748 		if (((flt_stat[0].mf_cntl | flt_stat[1].mf_cntl) &
1749 		    MAC_CNTL_PTRL_ERRS) == 0)
1750 			return (0);
1751 	} else {
1752 		if (((flt_stat[0].mf_cntl | flt_stat[1].mf_cntl) &
1753 		    MAC_CNTL_MI_ERRS) == 0)
1754 			return (0);
1755 	}
1756 
1757 	/*
1758 	 * First we take care of the case of CE
1759 	 * because they can become UE or CMPE
1760 	 */
1761 	for (i = 0; i < 2; i++) {
1762 		if (IS_CE_ONLY(flt_stat[i].mf_cntl, ptrl_error)) {
1763 			bank = flt_stat[i].mf_flt_maddr.ma_bank;
1764 			MC_LOG("CE detected on bank %d\n", bank);
1765 			mc_scrub_ce(mcp, bank, &flt_stat[i], ptrl_error);
1766 			if (MC_REWRITE_ACTIVE(mcp, bank)) {
1767 				rewrite_timeout = 1;
1768 			}
1769 			rv = 1;
1770 		}
1771 	}
1772 
1773 	if (rewrite_timeout)
1774 		return (0);
1775 
1776 	/* The above scrubbing can turn CE into UE or CMPE */
1777 
1778 	/*
1779 	 * Now we distinguish two cases: same address or not
1780 	 * the same address.  It might seem more intuitive to
1781 	 * distinguish PTRL v.s. MI error but it is more
1782 	 * complicated that way.
1783 	 */
1784 
1785 	if (flt_stat[0].mf_err_add == flt_stat[1].mf_err_add) {
1786 
1787 		if (IS_CMPE(flt_stat[0].mf_cntl, ptrl_error) ||
1788 		    IS_CMPE(flt_stat[1].mf_cntl, ptrl_error)) {
1789 			flt_stat[0].mf_type = FLT_TYPE_CMPE;
1790 			flt_stat[1].mf_type = FLT_TYPE_CMPE;
1791 			mc_aflt->mflt_erpt_class = MC_OPL_CMPE;
1792 			mc_aflt->mflt_nflts = 2;
1793 			mc_aflt->mflt_stat[0] = &flt_stat[0];
1794 			mc_aflt->mflt_stat[1] = &flt_stat[1];
1795 			mc_aflt->mflt_pr = PR_UE;
1796 			/*
1797 			 * Compare error is result of MAC internal error, so
1798 			 * simply log it instead of publishing an ereport. SCF
1799 			 * diagnoses all the MAC internal and its i/f error.
1800 			 */
1801 			MC_LOG("cmpe error detected\n");
1802 			return (1);
1803 		}
1804 
1805 		if (IS_UE(flt_stat[0].mf_cntl, ptrl_error) &&
1806 		    IS_UE(flt_stat[1].mf_cntl, ptrl_error)) {
1807 			/* Both side are UE's */
1808 
1809 			MAC_SET_ERRLOG_INFO(&flt_stat[0]);
1810 			MAC_SET_ERRLOG_INFO(&flt_stat[1]);
1811 			MC_LOG("MUE detected\n");
1812 			flt_stat[0].mf_type = FLT_TYPE_MUE;
1813 			flt_stat[1].mf_type = FLT_TYPE_MUE;
1814 			mc_aflt->mflt_erpt_class = MC_OPL_MUE;
1815 			mc_aflt->mflt_nflts = 2;
1816 			mc_aflt->mflt_stat[0] = &flt_stat[0];
1817 			mc_aflt->mflt_stat[1] = &flt_stat[1];
1818 			mc_aflt->mflt_pr = PR_UE;
1819 			mc_err_drain(mc_aflt);
1820 			return (1);
1821 		}
1822 
1823 		/* Now the only case is UE/CE, UE/OK, or don't care */
1824 		for (i = 0; i < 2; i++) {
1825 			if (IS_UE(flt_stat[i].mf_cntl, ptrl_error)) {
1826 
1827 			/* rewrite can clear the one side UE error */
1828 
1829 			if (IS_OK(flt_stat[i^1].mf_cntl, ptrl_error)) {
1830 				(void) do_rewrite(mcp,
1831 				    flt_stat[i].mf_flt_maddr.ma_bank,
1832 				    flt_stat[i].mf_flt_maddr.ma_dimm_addr, 0);
1833 			}
1834 			flt_stat[i].mf_type = FLT_TYPE_UE;
1835 			MAC_SET_ERRLOG_INFO(&flt_stat[i]);
1836 			mc_aflt->mflt_erpt_class = MC_OPL_SUE;
1837 			mc_aflt->mflt_stat[0] = &flt_stat[i];
1838 			mc_aflt->mflt_nflts = 1;
1839 			mc_aflt->mflt_pr = PR_MCE;
1840 			mc_err_drain(mc_aflt);
1841 			/* Once we hit a UE/CE or UE/OK case, done */
1842 			return (1);
1843 			}
1844 		}
1845 
1846 	} else {
1847 		/*
1848 		 * addresses are different. That means errors
1849 		 * on the 2 banks are not related at all.
1850 		 */
1851 		for (i = 0; i < 2; i++) {
1852 			if (IS_CMPE(flt_stat[i].mf_cntl, ptrl_error)) {
1853 				flt_stat[i].mf_type = FLT_TYPE_CMPE;
1854 				mc_aflt->mflt_erpt_class = MC_OPL_CMPE;
1855 				mc_aflt->mflt_nflts = 1;
1856 				mc_aflt->mflt_stat[0] = &flt_stat[i];
1857 				mc_aflt->mflt_pr = PR_UE;
1858 				/*
1859 				 * Compare error is result of MAC internal
1860 				 * error, so simply log it instead of
1861 				 * publishing an ereport. SCF diagnoses all
1862 				 * the MAC internal and its interface error.
1863 				 */
1864 				MC_LOG("cmpe error detected\n");
1865 				/* no more report on this bank */
1866 				flt_stat[i].mf_cntl = 0;
1867 				rv = 1;
1868 			}
1869 		}
1870 
1871 		/* rewrite can clear the one side UE error */
1872 
1873 		for (i = 0; i < 2; i++) {
1874 			if (IS_UE(flt_stat[i].mf_cntl, ptrl_error)) {
1875 				(void) do_rewrite(mcp,
1876 				    flt_stat[i].mf_flt_maddr.ma_bank,
1877 				    flt_stat[i].mf_flt_maddr.ma_dimm_addr,
1878 				    0);
1879 				flt_stat[i].mf_type = FLT_TYPE_UE;
1880 				MAC_SET_ERRLOG_INFO(&flt_stat[i]);
1881 				mc_aflt->mflt_erpt_class = MC_OPL_SUE;
1882 				mc_aflt->mflt_stat[0] = &flt_stat[i];
1883 				mc_aflt->mflt_nflts = 1;
1884 				mc_aflt->mflt_pr = PR_MCE;
1885 				mc_err_drain(mc_aflt);
1886 				rv = 1;
1887 			}
1888 		}
1889 	}
1890 	return (rv);
1891 }
1892 static void
1893 mc_error_handler_mir(mc_opl_t *mcp, int bank, mc_rsaddr_info_t *rsaddr)
1894 {
1895 	mc_aflt_t mc_aflt;
1896 	mc_flt_stat_t flt_stat[2], mi_flt_stat[2];
1897 	int i;
1898 	int mi_valid;
1899 
1900 	ASSERT(rsaddr);
1901 
1902 	bzero(&mc_aflt, sizeof (mc_aflt_t));
1903 	bzero(&flt_stat, 2 * sizeof (mc_flt_stat_t));
1904 	bzero(&mi_flt_stat, 2 * sizeof (mc_flt_stat_t));
1905 
1906 
1907 	mc_aflt.mflt_mcp = mcp;
1908 	mc_aflt.mflt_id = gethrtime();
1909 
1910 	/* Now read all the registers into flt_stat */
1911 
1912 	for (i = 0; i < 2; i++) {
1913 		MC_LOG("Reading registers of bank %d\n", bank);
1914 		/* patrol registers */
1915 		mc_read_ptrl_reg(mcp, bank, &flt_stat[i]);
1916 
1917 		/*
1918 		 * In mirror mode, it is possible that only one bank
1919 		 * may report the error. We need to check for it to
1920 		 * ensure we pick the right addr value for patrol restart.
1921 		 * Note that if both banks reported errors, we pick the
1922 		 * 2nd one. Both banks should reported the same error address.
1923 		 */
1924 		if (flt_stat[i].mf_cntl & MAC_CNTL_PTRL_ERRS)
1925 			rsaddr->mi_restartaddr = flt_stat[i].mf_flt_maddr;
1926 
1927 		MC_LOG("ptrl registers cntl %x add %x log %x\n",
1928 		    flt_stat[i].mf_cntl, flt_stat[i].mf_err_add,
1929 		    flt_stat[i].mf_err_log);
1930 
1931 		/* MI registers */
1932 		mc_read_mi_reg(mcp, bank, &mi_flt_stat[i]);
1933 
1934 		MC_LOG("MI registers cntl %x add %x log %x\n",
1935 		    mi_flt_stat[i].mf_cntl, mi_flt_stat[i].mf_err_add,
1936 		    mi_flt_stat[i].mf_err_log);
1937 
1938 		bank = bank^1;
1939 	}
1940 
1941 	/* clear errors once we read all the registers */
1942 	MAC_CLEAR_ERRS(mcp, bank, (MAC_CNTL_PTRL_ERRS|MAC_CNTL_MI_ERRS));
1943 
1944 	MAC_CLEAR_ERRS(mcp, bank ^ 1, (MAC_CNTL_PTRL_ERRS|MAC_CNTL_MI_ERRS));
1945 
1946 	/* Process MI errors first */
1947 
1948 	/* if not error mode, cntl1 is 0 */
1949 	if ((mi_flt_stat[0].mf_err_add & MAC_ERR_ADD_INVALID) ||
1950 	    (mi_flt_stat[0].mf_err_log & MAC_ERR_LOG_INVALID))
1951 		mi_flt_stat[0].mf_cntl = 0;
1952 
1953 	if ((mi_flt_stat[1].mf_err_add & MAC_ERR_ADD_INVALID) ||
1954 	    (mi_flt_stat[1].mf_err_log & MAC_ERR_LOG_INVALID))
1955 		mi_flt_stat[1].mf_cntl = 0;
1956 
1957 	mc_aflt.mflt_is_ptrl = 0;
1958 	mi_valid = mc_process_error_mir(mcp, &mc_aflt, &mi_flt_stat[0]);
1959 
1960 	if ((((flt_stat[0].mf_cntl & MAC_CNTL_PTRL_ERRS) >>
1961 	    MAC_CNTL_PTRL_ERR_SHIFT) == ((mi_flt_stat[0].mf_cntl &
1962 	    MAC_CNTL_MI_ERRS) >> MAC_CNTL_MI_ERR_SHIFT)) &&
1963 	    (flt_stat[0].mf_err_add ==
1964 	    ROUNDDOWN(mi_flt_stat[0].mf_err_add, MC_BOUND_BYTE)) &&
1965 	    (((flt_stat[1].mf_cntl & MAC_CNTL_PTRL_ERRS) >>
1966 	    MAC_CNTL_PTRL_ERR_SHIFT) == ((mi_flt_stat[1].mf_cntl &
1967 	    MAC_CNTL_MI_ERRS) >> MAC_CNTL_MI_ERR_SHIFT)) &&
1968 	    (flt_stat[1].mf_err_add ==
1969 	    ROUNDDOWN(mi_flt_stat[1].mf_err_add, MC_BOUND_BYTE))) {
1970 #ifdef DEBUG
1971 		MC_LOG("discarding PTRL error because "
1972 		    "it is the same as MI\n");
1973 #endif
1974 		rsaddr->mi_valid = mi_valid;
1975 		return;
1976 	}
1977 	/* if not error mode, cntl1 is 0 */
1978 	if ((flt_stat[0].mf_err_add & MAC_ERR_ADD_INVALID) ||
1979 	    (flt_stat[0].mf_err_log & MAC_ERR_LOG_INVALID))
1980 		flt_stat[0].mf_cntl = 0;
1981 
1982 	if ((flt_stat[1].mf_err_add & MAC_ERR_ADD_INVALID) ||
1983 	    (flt_stat[1].mf_err_log & MAC_ERR_LOG_INVALID))
1984 		flt_stat[1].mf_cntl = 0;
1985 
1986 	mc_aflt.mflt_is_ptrl = 1;
1987 	rsaddr->mi_valid = mc_process_error_mir(mcp, &mc_aflt, &flt_stat[0]);
1988 }
1989 static int
1990 mc_process_error(mc_opl_t *mcp, int bank, mc_aflt_t *mc_aflt,
1991 	mc_flt_stat_t *flt_stat)
1992 {
1993 	int ptrl_error = mc_aflt->mflt_is_ptrl;
1994 	int rv = 0;
1995 
1996 	mc_aflt->mflt_erpt_class = NULL;
1997 	if (IS_UE(flt_stat->mf_cntl, ptrl_error)) {
1998 		MC_LOG("UE detected\n");
1999 		flt_stat->mf_type = FLT_TYPE_UE;
2000 		mc_aflt->mflt_erpt_class = MC_OPL_UE;
2001 		mc_aflt->mflt_pr = PR_UE;
2002 		MAC_SET_ERRLOG_INFO(flt_stat);
2003 		rv = 1;
2004 	} else if (IS_CE(flt_stat->mf_cntl, ptrl_error)) {
2005 		MC_LOG("CE detected\n");
2006 		MAC_SET_ERRLOG_INFO(flt_stat);
2007 
2008 		/* Error type can change after scrubbing */
2009 		mc_scrub_ce(mcp, bank, flt_stat, ptrl_error);
2010 		if (MC_REWRITE_ACTIVE(mcp, bank)) {
2011 			return (0);
2012 		}
2013 
2014 		if (flt_stat->mf_type == FLT_TYPE_INTERMITTENT_CE) {
2015 			mc_aflt->mflt_erpt_class = MC_OPL_ICE;
2016 			mc_aflt->mflt_pr = PR_MCE;
2017 		} else if (flt_stat->mf_type == FLT_TYPE_PERMANENT_CE) {
2018 			mc_aflt->mflt_erpt_class = MC_OPL_CE;
2019 			mc_aflt->mflt_pr = PR_MCE;
2020 		} else if (flt_stat->mf_type == FLT_TYPE_UE) {
2021 			mc_aflt->mflt_erpt_class = MC_OPL_UE;
2022 			mc_aflt->mflt_pr = PR_UE;
2023 		}
2024 		rv = 1;
2025 	}
2026 	MC_LOG("mc_process_error: fault type %x erpt %s\n", flt_stat->mf_type,
2027 	    mc_aflt->mflt_erpt_class);
2028 	if (mc_aflt->mflt_erpt_class) {
2029 		mc_aflt->mflt_stat[0] = flt_stat;
2030 		mc_aflt->mflt_nflts = 1;
2031 		mc_err_drain(mc_aflt);
2032 	}
2033 	return (rv);
2034 }
2035 
2036 static void
2037 mc_error_handler(mc_opl_t *mcp, int bank, mc_rsaddr_info_t *rsaddr)
2038 {
2039 	mc_aflt_t mc_aflt;
2040 	mc_flt_stat_t flt_stat, mi_flt_stat;
2041 	int mi_valid;
2042 
2043 	bzero(&mc_aflt, sizeof (mc_aflt_t));
2044 	bzero(&flt_stat, sizeof (mc_flt_stat_t));
2045 	bzero(&mi_flt_stat, sizeof (mc_flt_stat_t));
2046 
2047 	mc_aflt.mflt_mcp = mcp;
2048 	mc_aflt.mflt_id = gethrtime();
2049 
2050 	/* patrol registers */
2051 	mc_read_ptrl_reg(mcp, bank, &flt_stat);
2052 
2053 	ASSERT(rsaddr);
2054 	rsaddr->mi_restartaddr = flt_stat.mf_flt_maddr;
2055 
2056 	MC_LOG("ptrl registers cntl %x add %x log %x\n", flt_stat.mf_cntl,
2057 	    flt_stat.mf_err_add, flt_stat.mf_err_log);
2058 
2059 	/* MI registers */
2060 	mc_read_mi_reg(mcp, bank, &mi_flt_stat);
2061 
2062 
2063 	MC_LOG("MI registers cntl %x add %x log %x\n", mi_flt_stat.mf_cntl,
2064 	    mi_flt_stat.mf_err_add, mi_flt_stat.mf_err_log);
2065 
2066 	/* clear errors once we read all the registers */
2067 	MAC_CLEAR_ERRS(mcp, bank, (MAC_CNTL_PTRL_ERRS|MAC_CNTL_MI_ERRS));
2068 
2069 	mc_aflt.mflt_is_ptrl = 0;
2070 	if ((mi_flt_stat.mf_cntl & MAC_CNTL_MI_ERRS) &&
2071 	    ((mi_flt_stat.mf_err_add & MAC_ERR_ADD_INVALID) == 0) &&
2072 	    ((mi_flt_stat.mf_err_log & MAC_ERR_LOG_INVALID) == 0)) {
2073 		mi_valid = mc_process_error(mcp, bank, &mc_aflt, &mi_flt_stat);
2074 	}
2075 
2076 	if ((((flt_stat.mf_cntl & MAC_CNTL_PTRL_ERRS) >>
2077 	    MAC_CNTL_PTRL_ERR_SHIFT) == ((mi_flt_stat.mf_cntl &
2078 	    MAC_CNTL_MI_ERRS) >> MAC_CNTL_MI_ERR_SHIFT)) &&
2079 	    (flt_stat.mf_err_add ==
2080 	    ROUNDDOWN(mi_flt_stat.mf_err_add, MC_BOUND_BYTE))) {
2081 #ifdef DEBUG
2082 		MC_LOG("discarding PTRL error because "
2083 		    "it is the same as MI\n");
2084 #endif
2085 		rsaddr->mi_valid = mi_valid;
2086 		return;
2087 	}
2088 
2089 	mc_aflt.mflt_is_ptrl = 1;
2090 	if ((flt_stat.mf_cntl & MAC_CNTL_PTRL_ERRS) &&
2091 	    ((flt_stat.mf_err_add & MAC_ERR_ADD_INVALID) == 0) &&
2092 	    ((flt_stat.mf_err_log & MAC_ERR_LOG_INVALID) == 0)) {
2093 		rsaddr->mi_valid = mc_process_error(mcp, bank, &mc_aflt,
2094 		    &flt_stat);
2095 	}
2096 }
2097 /*
2098  *	memory patrol error handling algorithm:
2099  *	timeout() is used to do periodic polling
2100  *	This is the flow chart.
2101  *	timeout ->
2102  *	mc_check_errors()
2103  *	    if memory bank is installed, read the status register
2104  *	    if any error bit is set,
2105  *	    -> mc_error_handler()
2106  *		-> read all error registers
2107  *	        -> mc_process_error()
2108  *	            determine error type
2109  *	            rewrite to clear error or scrub to determine CE type
2110  *	            inform SCF on permanent CE
2111  *	        -> mc_err_drain
2112  *	            page offline processing
2113  *	            -> mc_ereport_post()
2114  */
2115 
2116 static void
2117 mc_process_rewrite(mc_opl_t *mcp, int bank)
2118 {
2119 	uint32_t rew_addr, cntl;
2120 	mc_retry_info_t *retry;
2121 	struct mc_bank *bankp;
2122 
2123 	bankp = &(mcp->mc_bank[bank]);
2124 	retry = bankp->mcb_active;
2125 	if (retry == NULL)
2126 		return;
2127 
2128 	if (retry->ri_state <= RETRY_STATE_ACTIVE) {
2129 		cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
2130 		if (cntl & MAC_CNTL_PTRL_STATUS)
2131 			return;
2132 		rew_addr = retry->ri_addr;
2133 		ST_MAC_REG(MAC_REWRITE_ADD(mcp, bank), rew_addr);
2134 		MAC_REW_REQ(mcp, bank);
2135 
2136 		retry->ri_state = RETRY_STATE_REWRITE;
2137 	}
2138 
2139 	cntl = ldphysio(MAC_PTRL_CNTL(mcp, bank));
2140 
2141 	if (cntl & MAC_CNTL_REW_END) {
2142 		MAC_CLEAR_ERRS(mcp, bank,
2143 		    MAC_CNTL_REW_ERRS);
2144 		mc_clear_rewrite(mcp, bank);
2145 	} else {
2146 		/*
2147 		 * If the rewrite does not complete in
2148 		 * 1 hour, we have to consider this a HW
2149 		 * failure.  However, there is no recovery
2150 		 * mechanism.  The only thing we can do
2151 		 * to to print a warning message to the
2152 		 * console.  We continue to increment the
2153 		 * counter but we only print the message
2154 		 * once.  It will take the counter a long
2155 		 * time to wrap around and the user might
2156 		 * see a second message.  In practice,
2157 		 * we have never hit this condition but
2158 		 * we have to keep the code here just in case.
2159 		 */
2160 		if (++mcp->mc_bank[bank].mcb_rewrite_count
2161 		    == mc_max_rewrite_retry) {
2162 			cmn_err(CE_WARN, "Memory patrol feature is"
2163 			" partly suspended on /LSB%d/B%d"
2164 			" due to heavy memory load,"
2165 			" and it will restart"
2166 			" automatically.\n", mcp->mc_board_num,
2167 			    bank);
2168 		}
2169 	}
2170 }
2171 
2172 static void
2173 mc_check_errors_func(mc_opl_t *mcp)
2174 {
2175 	mc_rsaddr_info_t rsaddr_info;
2176 	int i, error_count = 0;
2177 	uint32_t stat, cntl;
2178 	int running;
2179 	int wrapped;
2180 	int ebk;
2181 
2182 	/*
2183 	 * scan errors.
2184 	 */
2185 	if (mcp->mc_status & MC_MEMORYLESS)
2186 		return;
2187 
2188 	for (i = 0; i < BANKNUM_PER_SB; i++) {
2189 		if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
2190 			if (MC_REWRITE_ACTIVE(mcp, i)) {
2191 				mc_process_rewrite(mcp, i);
2192 			}
2193 			stat = ldphysio(MAC_PTRL_STAT(mcp, i));
2194 			cntl = ldphysio(MAC_PTRL_CNTL(mcp, i));
2195 			running = cntl & MAC_CNTL_PTRL_START;
2196 			wrapped = cntl & MAC_CNTL_PTRL_ADD_MAX;
2197 
2198 			/* Compute the effective bank idx */
2199 			ebk = (IS_MIRROR(mcp, i)) ? MIRROR_IDX(i) : i;
2200 
2201 			if (mc_debug_show_all || stat) {
2202 				MC_LOG("/LSB%d/B%d stat %x cntl %x\n",
2203 				    mcp->mc_board_num, i, stat, cntl);
2204 			}
2205 
2206 			/*
2207 			 * Update stats and reset flag if the HW patrol
2208 			 * wrapped around in its scan.
2209 			 */
2210 			if (wrapped) {
2211 				MAC_CLEAR_MAX(mcp, i);
2212 				mcp->mc_period[ebk]++;
2213 				if (IS_MIRROR(mcp, i)) {
2214 					MC_LOG("mirror mc period %ld on "
2215 					    "/LSB%d/B%d\n", mcp->mc_period[ebk],
2216 					    mcp->mc_board_num, i);
2217 				} else {
2218 					MC_LOG("mc period %ld on "
2219 					    "/LSB%d/B%d\n", mcp->mc_period[ebk],
2220 					    mcp->mc_board_num, i);
2221 				}
2222 			}
2223 
2224 			if (running) {
2225 				/*
2226 				 * Mac patrol HW is still running.
2227 				 * Normally when an error is detected,
2228 				 * the HW patrol will stop so that we
2229 				 * can collect error data for reporting.
2230 				 * Certain errors (MI errors) detected may not
2231 				 * cause the HW patrol to stop which is a
2232 				 * problem since we cannot read error data while
2233 				 * the HW patrol is running. SW is not allowed
2234 				 * to stop the HW patrol while it is running
2235 				 * as it may cause HW inconsistency. This is
2236 				 * described in a HW errata.
2237 				 * In situations where we detected errors
2238 				 * that may not cause the HW patrol to stop.
2239 				 * We speed up the HW patrol scanning in
2240 				 * the hope that it will find the 'real' PTRL
2241 				 * errors associated with the previous errors
2242 				 * causing the HW to finally stop so that we
2243 				 * can do the reporting.
2244 				 */
2245 				/*
2246 				 * Check to see if we did speed up
2247 				 * the HW patrol due to previous errors
2248 				 * detected that did not cause the patrol
2249 				 * to stop. We only do it if HW patrol scan
2250 				 * wrapped (counted as completing a 'period').
2251 				 */
2252 				if (mcp->mc_speedup_period[ebk] > 0) {
2253 					if (wrapped &&
2254 					    (--mcp->mc_speedup_period[ebk] ==
2255 					    0)) {
2256 						/*
2257 						 * We did try to speed up.
2258 						 * The speed up period has
2259 						 * expired and the HW patrol
2260 						 * is still running.  The
2261 						 * errors must be intermittent.
2262 						 * We have no choice but to
2263 						 * ignore them, reset the scan
2264 						 * speed to normal and clear
2265 						 * the MI error bits. For
2266 						 * mirror mode, we need to
2267 						 * clear errors on both banks.
2268 						 */
2269 						MC_LOG("Clearing MI errors\n");
2270 						MAC_CLEAR_ERRS(mcp, i,
2271 						    MAC_CNTL_MI_ERRS);
2272 
2273 						if (IS_MIRROR(mcp, i)) {
2274 							MC_LOG("Clearing "
2275 							    "Mirror MI errs\n");
2276 							MAC_CLEAR_ERRS(mcp,
2277 							    i^1,
2278 							    MAC_CNTL_MI_ERRS);
2279 						}
2280 					}
2281 				} else if (stat & MAC_STAT_MI_ERRS) {
2282 					/*
2283 					 * MI errors detected but we cannot
2284 					 * report them since the HW patrol
2285 					 * is still running.
2286 					 * We will attempt to speed up the
2287 					 * scanning and hopefully the HW
2288 					 * can detect PRTL errors at the same
2289 					 * location that cause the HW patrol
2290 					 * to stop.
2291 					 */
2292 					mcp->mc_speedup_period[ebk] = 2;
2293 					MAC_CMD(mcp, i, 0);
2294 				}
2295 			} else if (stat & (MAC_STAT_PTRL_ERRS |
2296 			    MAC_STAT_MI_ERRS)) {
2297 				/*
2298 				 * HW Patrol has stopped and we found errors.
2299 				 * Proceed to collect and report error info.
2300 				 */
2301 				mcp->mc_speedup_period[ebk] = 0;
2302 				rsaddr_info.mi_valid = 0;
2303 				rsaddr_info.mi_injectrestart = 0;
2304 				if (IS_MIRROR(mcp, i)) {
2305 					mc_error_handler_mir(mcp, i,
2306 					    &rsaddr_info);
2307 				} else {
2308 					mc_error_handler(mcp, i, &rsaddr_info);
2309 				}
2310 
2311 				error_count++;
2312 				(void) restart_patrol(mcp, i, &rsaddr_info);
2313 			} else {
2314 				/*
2315 				 * HW patrol scan has apparently stopped
2316 				 * but no errors detected/flagged.
2317 				 * Restart the HW patrol just to be sure.
2318 				 * In mirror mode, the odd bank might have
2319 				 * reported errors that caused the patrol to
2320 				 * stop. We'll defer the restart to the odd
2321 				 * bank in this case.
2322 				 */
2323 				if (!IS_MIRROR(mcp, i) || (i & 0x1))
2324 					(void) restart_patrol(mcp, i, NULL);
2325 			}
2326 		}
2327 	}
2328 	if (error_count > 0)
2329 		mcp->mc_last_error += error_count;
2330 	else
2331 		mcp->mc_last_error = 0;
2332 }
2333 
2334 /*
2335  * mc_polling -- Check errors for only one instance,
2336  * but process errors for all instances to make sure we drain the errors
2337  * faster than they can be accumulated.
2338  *
2339  * Polling on each board should be done only once per each
2340  * mc_patrol_interval_sec.  This is equivalent to setting mc_tick_left
2341  * to OPL_MAX_BOARDS and decrement by 1 on each timeout.
2342  * Once mc_tick_left becomes negative, the board becomes a candidate
2343  * for polling because it has waited for at least
2344  * mc_patrol_interval_sec's long.    If mc_timeout_period is calculated
2345  * differently, this has to be updated accordingly.
2346  */
2347 
2348 static void
2349 mc_polling(void)
2350 {
2351 	int i, scan_error;
2352 	mc_opl_t *mcp;
2353 
2354 
2355 	scan_error = 1;
2356 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
2357 		mutex_enter(&mcmutex);
2358 		if ((mcp = mc_instances[i]) == NULL) {
2359 			mutex_exit(&mcmutex);
2360 			continue;
2361 		}
2362 		mutex_enter(&mcp->mc_lock);
2363 		mutex_exit(&mcmutex);
2364 		if (!(mcp->mc_status & MC_POLL_RUNNING)) {
2365 			mutex_exit(&mcp->mc_lock);
2366 			continue;
2367 		}
2368 		if (scan_error && mcp->mc_tick_left <= 0) {
2369 			mc_check_errors_func((void *)mcp);
2370 			mcp->mc_tick_left = OPL_MAX_BOARDS;
2371 			scan_error = 0;
2372 		} else {
2373 			mcp->mc_tick_left--;
2374 		}
2375 		mc_process_scf_log(mcp);
2376 		mutex_exit(&mcp->mc_lock);
2377 	}
2378 }
2379 
2380 static void
2381 get_ptrl_start_address(mc_opl_t *mcp, int bank, mc_addr_t *maddr)
2382 {
2383 	maddr->ma_bd = mcp->mc_board_num;
2384 	maddr->ma_bank = bank;
2385 	maddr->ma_dimm_addr = 0;
2386 }
2387 
2388 typedef struct mc_mem_range {
2389 	uint64_t	addr;
2390 	uint64_t	size;
2391 } mc_mem_range_t;
2392 
2393 static int
2394 get_base_address(mc_opl_t *mcp)
2395 {
2396 	mc_mem_range_t *mem_range;
2397 	int len;
2398 
2399 	if (ddi_getlongprop(DDI_DEV_T_ANY, mcp->mc_dip, DDI_PROP_DONTPASS,
2400 	    "sb-mem-ranges", (caddr_t)&mem_range, &len) != DDI_SUCCESS) {
2401 		return (DDI_FAILURE);
2402 	}
2403 
2404 	mcp->mc_start_address = mem_range->addr;
2405 	mcp->mc_size = mem_range->size;
2406 
2407 	kmem_free(mem_range, len);
2408 	return (DDI_SUCCESS);
2409 }
2410 
2411 struct mc_addr_spec {
2412 	uint32_t bank;
2413 	uint32_t phys_hi;
2414 	uint32_t phys_lo;
2415 };
2416 
2417 #define	REGS_PA(m, i) ((((uint64_t)m[i].phys_hi)<<32) | m[i].phys_lo)
2418 
2419 static char *mc_tbl_name[] = {
2420 	"cs0-mc-pa-trans-table",
2421 	"cs1-mc-pa-trans-table"
2422 };
2423 
2424 /*
2425  * This routine performs a rangecheck for a given PA
2426  * to see if it belongs to the memory range for this board.
2427  * Return 1 if it is valid (within the range) and 0 otherwise
2428  */
2429 static int
2430 mc_rangecheck_pa(mc_opl_t *mcp, uint64_t pa)
2431 {
2432 	if ((pa < mcp->mc_start_address) || (mcp->mc_start_address +
2433 	    mcp->mc_size <= pa))
2434 		return (0);
2435 	else
2436 		return (1);
2437 }
2438 
2439 static void
2440 mc_memlist_delete(struct memlist *mlist)
2441 {
2442 	struct memlist *ml;
2443 
2444 	for (ml = mlist; ml; ml = mlist) {
2445 		mlist = ml->ml_next;
2446 		kmem_free(ml, sizeof (struct memlist));
2447 	}
2448 }
2449 
2450 static struct memlist *
2451 mc_memlist_dup(struct memlist *mlist)
2452 {
2453 	struct memlist *hl = NULL, *tl, **mlp;
2454 
2455 	if (mlist == NULL)
2456 		return (NULL);
2457 
2458 	mlp = &hl;
2459 	tl = *mlp;
2460 	for (; mlist; mlist = mlist->ml_next) {
2461 		*mlp = kmem_alloc(sizeof (struct memlist), KM_SLEEP);
2462 		(*mlp)->ml_address = mlist->ml_address;
2463 		(*mlp)->ml_size = mlist->ml_size;
2464 		(*mlp)->ml_prev = tl;
2465 		tl = *mlp;
2466 		mlp = &((*mlp)->ml_next);
2467 	}
2468 	*mlp = NULL;
2469 
2470 	return (hl);
2471 }
2472 
2473 
2474 static struct memlist *
2475 mc_memlist_del_span(struct memlist *mlist, uint64_t base, uint64_t len)
2476 {
2477 	uint64_t	end;
2478 	struct memlist	*ml, *tl, *nlp;
2479 
2480 	if (mlist == NULL)
2481 		return (NULL);
2482 
2483 	end = base + len;
2484 	if ((end <= mlist->ml_address) || (base == end))
2485 		return (mlist);
2486 
2487 	for (tl = ml = mlist; ml; tl = ml, ml = nlp) {
2488 		uint64_t	mend;
2489 
2490 		nlp = ml->ml_next;
2491 
2492 		if (end <= ml->ml_address)
2493 			break;
2494 
2495 		mend = ml->ml_address + ml->ml_size;
2496 		if (base < mend) {
2497 			if (base <= ml->ml_address) {
2498 				ml->ml_address = end;
2499 				if (end >= mend)
2500 					ml->ml_size = 0ull;
2501 				else
2502 					ml->ml_size = mend - ml->ml_address;
2503 			} else {
2504 				ml->ml_size = base - ml->ml_address;
2505 				if (end < mend) {
2506 					struct memlist	*nl;
2507 					/*
2508 					 * splitting an memlist entry.
2509 					 */
2510 					nl = kmem_alloc(sizeof (struct memlist),
2511 					    KM_SLEEP);
2512 					nl->ml_address = end;
2513 					nl->ml_size = mend - nl->ml_address;
2514 					if ((nl->ml_next = nlp) != NULL)
2515 						nlp->ml_prev = nl;
2516 					nl->ml_prev = ml;
2517 					ml->ml_next = nl;
2518 					nlp = nl;
2519 				}
2520 			}
2521 			if (ml->ml_size == 0ull) {
2522 				if (ml == mlist) {
2523 					if ((mlist = nlp) != NULL)
2524 						nlp->ml_prev = NULL;
2525 					kmem_free(ml, sizeof (struct memlist));
2526 					if (mlist == NULL)
2527 						break;
2528 					ml = nlp;
2529 				} else {
2530 					if ((tl->ml_next = nlp) != NULL)
2531 						nlp->ml_prev = tl;
2532 					kmem_free(ml, sizeof (struct memlist));
2533 					ml = tl;
2534 				}
2535 			}
2536 		}
2537 	}
2538 
2539 	return (mlist);
2540 }
2541 
2542 static void
2543 mc_get_mlist(mc_opl_t *mcp)
2544 {
2545 	struct memlist *mlist;
2546 
2547 	memlist_read_lock();
2548 	mlist = mc_memlist_dup(phys_install);
2549 	memlist_read_unlock();
2550 
2551 	if (mlist) {
2552 		mlist = mc_memlist_del_span(mlist, 0ull, mcp->mc_start_address);
2553 	}
2554 
2555 	if (mlist) {
2556 		uint64_t startpa, endpa;
2557 
2558 		startpa = mcp->mc_start_address + mcp->mc_size;
2559 		endpa = ptob(physmax + 1);
2560 		if (endpa > startpa) {
2561 			mlist = mc_memlist_del_span(mlist, startpa,
2562 			    endpa - startpa);
2563 		}
2564 	}
2565 
2566 	if (mlist) {
2567 		mcp->mlist = mlist;
2568 	}
2569 }
2570 
2571 int
2572 mc_board_add(mc_opl_t *mcp)
2573 {
2574 	struct mc_addr_spec *macaddr;
2575 	cs_status_t *cs_status;
2576 	int len, len1, i, bk, cc;
2577 	mc_rsaddr_info_t rsaddr;
2578 	uint32_t mirr;
2579 	int nbanks = 0;
2580 	uint64_t nbytes = 0;
2581 	int mirror_mode = 0;
2582 	int ret;
2583 
2584 	/*
2585 	 * Get configurations from "pseudo-mc" node which includes:
2586 	 * board# : LSB number
2587 	 * mac-addr : physical base address of MAC registers
2588 	 * csX-mac-pa-trans-table: translation table from DIMM address
2589 	 *			to physical address or vice versa.
2590 	 */
2591 	mcp->mc_board_num = (int)ddi_getprop(DDI_DEV_T_ANY, mcp->mc_dip,
2592 	    DDI_PROP_DONTPASS, "board#", -1);
2593 
2594 	if (mcp->mc_board_num == -1) {
2595 		return (DDI_FAILURE);
2596 	}
2597 
2598 	/*
2599 	 * Get start address in this CAB. It can be gotten from
2600 	 * "sb-mem-ranges" property.
2601 	 */
2602 
2603 	if (get_base_address(mcp) == DDI_FAILURE) {
2604 		return (DDI_FAILURE);
2605 	}
2606 	/* get mac-pa trans tables */
2607 	for (i = 0; i < MC_TT_CS; i++) {
2608 		len = MC_TT_ENTRIES;
2609 		cc = ddi_getlongprop_buf(DDI_DEV_T_ANY, mcp->mc_dip,
2610 		    DDI_PROP_DONTPASS, mc_tbl_name[i],
2611 		    (caddr_t)mcp->mc_trans_table[i], &len);
2612 
2613 		if (cc != DDI_SUCCESS) {
2614 			bzero(mcp->mc_trans_table[i], MC_TT_ENTRIES);
2615 		}
2616 	}
2617 	mcp->mlist = NULL;
2618 
2619 	mc_get_mlist(mcp);
2620 
2621 	/* initialize bank informations */
2622 	cc = ddi_getlongprop(DDI_DEV_T_ANY, mcp->mc_dip, DDI_PROP_DONTPASS,
2623 	    "mc-addr", (caddr_t)&macaddr, &len);
2624 	if (cc != DDI_SUCCESS) {
2625 		cmn_err(CE_WARN, "Cannot get mc-addr. err=%d\n", cc);
2626 		return (DDI_FAILURE);
2627 	}
2628 
2629 	cc = ddi_getlongprop(DDI_DEV_T_ANY, mcp->mc_dip, DDI_PROP_DONTPASS,
2630 	    "cs-status", (caddr_t)&cs_status, &len1);
2631 
2632 	if (cc != DDI_SUCCESS) {
2633 		if (len > 0)
2634 			kmem_free(macaddr, len);
2635 		cmn_err(CE_WARN, "Cannot get cs-status. err=%d\n", cc);
2636 		return (DDI_FAILURE);
2637 	}
2638 	/* get the physical board number for a given logical board number */
2639 	mcp->mc_phys_board_num = mc_opl_get_physical_board(mcp->mc_board_num);
2640 
2641 	if (mcp->mc_phys_board_num < 0) {
2642 		if (len > 0)
2643 			kmem_free(macaddr, len);
2644 		cmn_err(CE_WARN, "Unable to obtain the physical board number");
2645 		return (DDI_FAILURE);
2646 	}
2647 
2648 	mutex_init(&mcp->mc_lock, NULL, MUTEX_DRIVER, NULL);
2649 
2650 	for (i = 0; i < len1 / sizeof (cs_status_t); i++) {
2651 		nbytes += ((uint64_t)cs_status[i].cs_avail_hi << 32) |
2652 		    ((uint64_t)cs_status[i].cs_avail_low);
2653 	}
2654 	if (len1 > 0)
2655 		kmem_free(cs_status, len1);
2656 	nbanks = len / sizeof (struct mc_addr_spec);
2657 
2658 	if (nbanks > 0)
2659 		nbytes /= nbanks;
2660 	else {
2661 		/* No need to free macaddr because len must be 0 */
2662 		mcp->mc_status |= MC_MEMORYLESS;
2663 		return (DDI_SUCCESS);
2664 	}
2665 
2666 	for (i = 0; i < BANKNUM_PER_SB; i++) {
2667 		mcp->mc_scf_retry[i] = 0;
2668 		mcp->mc_period[i] = 0;
2669 		mcp->mc_speedup_period[i] = 0;
2670 	}
2671 
2672 	/*
2673 	 * Get the memory size here. Let it be B (bytes).
2674 	 * Let T be the time in u.s. to scan 64 bytes.
2675 	 * If we want to complete 1 round of scanning in P seconds.
2676 	 *
2677 	 *	B * T * 10^(-6)	= P
2678 	 *	---------------
2679 	 *		64
2680 	 *
2681 	 *	T = P * 64 * 10^6
2682 	 *	    -------------
2683 	 *		B
2684 	 *
2685 	 *	  = P * 64 * 10^6
2686 	 *	    -------------
2687 	 *		B
2688 	 *
2689 	 *	The timing bits are set in PTRL_CNTL[28:26] where
2690 	 *
2691 	 *	0	- 1 m.s
2692 	 *	1	- 512 u.s.
2693 	 *	10	- 256 u.s.
2694 	 *	11	- 128 u.s.
2695 	 *	100	- 64 u.s.
2696 	 *	101	- 32 u.s.
2697 	 *	110	- 0 u.s.
2698 	 *	111	- reserved.
2699 	 *
2700 	 *
2701 	 *	a[0] = 110, a[1] = 101, ... a[6] = 0
2702 	 *
2703 	 *	cs-status property is int x 7
2704 	 *	0 - cs#
2705 	 *	1 - cs-status
2706 	 *	2 - cs-avail.hi
2707 	 *	3 - cs-avail.lo
2708 	 *	4 - dimm-capa.hi
2709 	 *	5 - dimm-capa.lo
2710 	 *	6 - #of dimms
2711 	 */
2712 
2713 	if (nbytes > 0) {
2714 		int i;
2715 		uint64_t ms;
2716 		ms = ((uint64_t)mc_scan_period * 64 * 1000000)/nbytes;
2717 		mcp->mc_speed = mc_scan_speeds[MC_MAX_SPEEDS - 1].mc_speeds;
2718 		for (i = 0; i < MC_MAX_SPEEDS - 1; i++) {
2719 			if (ms < mc_scan_speeds[i + 1].mc_period) {
2720 				mcp->mc_speed = mc_scan_speeds[i].mc_speeds;
2721 				break;
2722 			}
2723 		}
2724 	} else
2725 		mcp->mc_speed = 0;
2726 
2727 
2728 	for (i = 0; i < len / sizeof (struct mc_addr_spec); i++) {
2729 		struct mc_bank *bankp;
2730 		mc_retry_info_t *retry;
2731 		uint32_t reg;
2732 		int k;
2733 
2734 		/*
2735 		 * setup bank
2736 		 */
2737 		bk = macaddr[i].bank;
2738 		bankp = &(mcp->mc_bank[bk]);
2739 		bankp->mcb_status = BANK_INSTALLED;
2740 		bankp->mcb_reg_base = REGS_PA(macaddr, i);
2741 
2742 		bankp->mcb_retry_freelist = NULL;
2743 		bankp->mcb_retry_pending = NULL;
2744 		bankp->mcb_active = NULL;
2745 		retry = &bankp->mcb_retry_infos[0];
2746 		for (k = 0; k < MC_RETRY_COUNT; k++, retry++) {
2747 			mc_retry_info_put(&bankp->mcb_retry_freelist, retry);
2748 		}
2749 
2750 		reg = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bk));
2751 		bankp->mcb_ptrl_cntl = (reg & MAC_CNTL_PTRL_PRESERVE_BITS);
2752 
2753 		/*
2754 		 * check if mirror mode
2755 		 */
2756 		mirr = LD_MAC_REG(MAC_MIRR(mcp, bk));
2757 
2758 		if (mirr & MAC_MIRR_MIRROR_MODE) {
2759 			MC_LOG("Mirror -> /LSB%d/B%d\n", mcp->mc_board_num,
2760 			    bk);
2761 			bankp->mcb_status |= BANK_MIRROR_MODE;
2762 			mirror_mode = 1;
2763 			/*
2764 			 * The following bit is only used for
2765 			 * error injection.  We should clear it
2766 			 */
2767 			if (mirr & MAC_MIRR_BANK_EXCLUSIVE)
2768 				ST_MAC_REG(MAC_MIRR(mcp, bk), 0);
2769 		}
2770 
2771 		/*
2772 		 * restart if not mirror mode or the other bank
2773 		 * of the mirror is not running
2774 		 */
2775 		if (!(mirr & MAC_MIRR_MIRROR_MODE) ||
2776 		    !(mcp->mc_bank[bk^1].mcb_status & BANK_PTRL_RUNNING)) {
2777 			MC_LOG("Starting up /LSB%d/B%d\n", mcp->mc_board_num,
2778 			    bk);
2779 			get_ptrl_start_address(mcp, bk, &rsaddr.mi_restartaddr);
2780 			rsaddr.mi_valid = 0;
2781 			rsaddr.mi_injectrestart = 0;
2782 			(void) restart_patrol(mcp, bk, &rsaddr);
2783 		} else {
2784 			MC_LOG("Not starting up /LSB%d/B%d\n",
2785 			    mcp->mc_board_num, bk);
2786 		}
2787 		bankp->mcb_status |= BANK_PTRL_RUNNING;
2788 	}
2789 	if (len > 0)
2790 		kmem_free(macaddr, len);
2791 
2792 	ret = ndi_prop_update_int(DDI_DEV_T_NONE, mcp->mc_dip, "mirror-mode",
2793 	    mirror_mode);
2794 	if (ret != DDI_PROP_SUCCESS) {
2795 		cmn_err(CE_WARN, "Unable to update mirror-mode property");
2796 	}
2797 
2798 	mcp->mc_dimm_list = mc_get_dimm_list(mcp);
2799 
2800 	/*
2801 	 * set interval in HZ.
2802 	 */
2803 	mcp->mc_last_error = 0;
2804 
2805 	/* restart memory patrol checking */
2806 	mcp->mc_status |= MC_POLL_RUNNING;
2807 
2808 	return (DDI_SUCCESS);
2809 }
2810 
2811 int
2812 mc_board_del(mc_opl_t *mcp)
2813 {
2814 	int i;
2815 	scf_log_t *p;
2816 
2817 	/*
2818 	 * cleanup mac state
2819 	 */
2820 	mutex_enter(&mcp->mc_lock);
2821 	if (mcp->mc_status & MC_MEMORYLESS) {
2822 		mutex_exit(&mcp->mc_lock);
2823 		mutex_destroy(&mcp->mc_lock);
2824 		return (DDI_SUCCESS);
2825 	}
2826 	for (i = 0; i < BANKNUM_PER_SB; i++) {
2827 		if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
2828 			mcp->mc_bank[i].mcb_status &= ~BANK_INSTALLED;
2829 		}
2830 	}
2831 
2832 	/* stop memory patrol checking */
2833 	mcp->mc_status &= ~MC_POLL_RUNNING;
2834 
2835 	/* just throw away all the scf logs */
2836 	for (i = 0; i < BANKNUM_PER_SB; i++) {
2837 		while ((p = mcp->mc_scf_log[i]) != NULL) {
2838 			mcp->mc_scf_log[i] = p->sl_next;
2839 			mcp->mc_scf_total[i]--;
2840 			kmem_free(p, sizeof (scf_log_t));
2841 		}
2842 	}
2843 
2844 	if (mcp->mlist)
2845 		mc_memlist_delete(mcp->mlist);
2846 
2847 	if (mcp->mc_dimm_list)
2848 		mc_free_dimm_list(mcp->mc_dimm_list);
2849 
2850 	mutex_exit(&mcp->mc_lock);
2851 
2852 	mutex_destroy(&mcp->mc_lock);
2853 	return (DDI_SUCCESS);
2854 }
2855 
2856 int
2857 mc_suspend(mc_opl_t *mcp, uint32_t flag)
2858 {
2859 	/* stop memory patrol checking */
2860 	mutex_enter(&mcp->mc_lock);
2861 	if (mcp->mc_status & MC_MEMORYLESS) {
2862 		mutex_exit(&mcp->mc_lock);
2863 		return (DDI_SUCCESS);
2864 	}
2865 
2866 	mcp->mc_status &= ~MC_POLL_RUNNING;
2867 
2868 	mcp->mc_status |= flag;
2869 	mutex_exit(&mcp->mc_lock);
2870 
2871 	return (DDI_SUCCESS);
2872 }
2873 
2874 void
2875 opl_mc_update_mlist(void)
2876 {
2877 	int i;
2878 	mc_opl_t *mcp;
2879 
2880 	/*
2881 	 * memory information is not updated until
2882 	 * the post attach/detach stage during DR.
2883 	 * This interface is used by dr_mem to inform
2884 	 * mc-opl to update the mlist.
2885 	 */
2886 
2887 	mutex_enter(&mcmutex);
2888 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
2889 		if ((mcp = mc_instances[i]) == NULL)
2890 			continue;
2891 		mutex_enter(&mcp->mc_lock);
2892 		if (mcp->mlist)
2893 			mc_memlist_delete(mcp->mlist);
2894 		mcp->mlist = NULL;
2895 		mc_get_mlist(mcp);
2896 		mutex_exit(&mcp->mc_lock);
2897 	}
2898 	mutex_exit(&mcmutex);
2899 }
2900 
2901 /* caller must clear the SUSPEND bits or this will do nothing */
2902 
2903 int
2904 mc_resume(mc_opl_t *mcp, uint32_t flag)
2905 {
2906 	int i;
2907 	uint64_t basepa;
2908 
2909 	mutex_enter(&mcp->mc_lock);
2910 	if (mcp->mc_status & MC_MEMORYLESS) {
2911 		mutex_exit(&mcp->mc_lock);
2912 		return (DDI_SUCCESS);
2913 	}
2914 	basepa = mcp->mc_start_address;
2915 	if (get_base_address(mcp) == DDI_FAILURE) {
2916 		mutex_exit(&mcp->mc_lock);
2917 		return (DDI_FAILURE);
2918 	}
2919 
2920 	if (basepa != mcp->mc_start_address) {
2921 		if (mcp->mlist)
2922 			mc_memlist_delete(mcp->mlist);
2923 		mcp->mlist = NULL;
2924 		mc_get_mlist(mcp);
2925 	}
2926 
2927 	mcp->mc_status &= ~flag;
2928 
2929 	if (mcp->mc_status & (MC_SOFT_SUSPENDED | MC_DRIVER_SUSPENDED)) {
2930 		mutex_exit(&mcp->mc_lock);
2931 		return (DDI_SUCCESS);
2932 	}
2933 
2934 	if (!(mcp->mc_status & MC_POLL_RUNNING)) {
2935 		/* restart memory patrol checking */
2936 		mcp->mc_status |= MC_POLL_RUNNING;
2937 		for (i = 0; i < BANKNUM_PER_SB; i++) {
2938 			if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
2939 				mc_check_errors_func(mcp);
2940 			}
2941 		}
2942 	}
2943 	mutex_exit(&mcp->mc_lock);
2944 
2945 	return (DDI_SUCCESS);
2946 }
2947 
2948 static mc_opl_t *
2949 mc_pa_to_mcp(uint64_t pa)
2950 {
2951 	mc_opl_t *mcp;
2952 	int i;
2953 
2954 	ASSERT(MUTEX_HELD(&mcmutex));
2955 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
2956 		if ((mcp = mc_instances[i]) == NULL)
2957 			continue;
2958 		/* if mac patrol is suspended, we cannot rely on it */
2959 		if (!(mcp->mc_status & MC_POLL_RUNNING) ||
2960 		    (mcp->mc_status & MC_SOFT_SUSPENDED))
2961 			continue;
2962 		if (mc_rangecheck_pa(mcp, pa)) {
2963 			return (mcp);
2964 		}
2965 	}
2966 	return (NULL);
2967 }
2968 
2969 /*
2970  * Get Physical Board number from Logical one.
2971  */
2972 static int
2973 mc_opl_get_physical_board(int sb)
2974 {
2975 	if (&opl_get_physical_board) {
2976 		return (opl_get_physical_board(sb));
2977 	}
2978 
2979 	cmn_err(CE_NOTE, "!opl_get_physical_board() not loaded\n");
2980 	return (-1);
2981 }
2982 
2983 /* ARGSUSED */
2984 int
2985 mc_get_mem_unum(int synd_code, uint64_t flt_addr, char *buf, int buflen,
2986 	int *lenp)
2987 {
2988 	int i;
2989 	int j;
2990 	int sb;
2991 	int bank;
2992 	int cs;
2993 	int rv = 0;
2994 	mc_opl_t *mcp;
2995 	char memb_num;
2996 
2997 	mutex_enter(&mcmutex);
2998 
2999 	if (((mcp = mc_pa_to_mcp(flt_addr)) == NULL) ||
3000 	    (!pa_is_valid(mcp, flt_addr))) {
3001 		mutex_exit(&mcmutex);
3002 		if (snprintf(buf, buflen, "UNKNOWN") >= buflen) {
3003 			return (ENOSPC);
3004 		} else {
3005 			if (lenp)
3006 				*lenp = strlen(buf);
3007 		}
3008 		return (0);
3009 	}
3010 
3011 	bank = pa_to_bank(mcp, flt_addr - mcp->mc_start_address);
3012 	sb = mcp->mc_phys_board_num;
3013 	cs = pa_to_cs(mcp, flt_addr - mcp->mc_start_address);
3014 
3015 	if (sb == -1) {
3016 		mutex_exit(&mcmutex);
3017 		return (ENXIO);
3018 	}
3019 
3020 	switch (plat_model) {
3021 	case MODEL_DC:
3022 		i = BD_BK_SLOT_TO_INDEX(0, bank, 0);
3023 		j = (cs == 0) ? i : i + 2;
3024 		(void) snprintf(buf, buflen, "/%s%02d/MEM%s MEM%s",
3025 		    model_names[plat_model].unit_name, sb,
3026 		    mc_dc_dimm_unum_table[j],
3027 		    mc_dc_dimm_unum_table[j + 1]);
3028 		break;
3029 	case MODEL_FF2:
3030 	case MODEL_FF1:
3031 		i = BD_BK_SLOT_TO_INDEX(sb, bank, 0);
3032 		j = (cs == 0) ? i : i + 2;
3033 		memb_num = mc_ff_dimm_unum_table[i][0];
3034 		(void) snprintf(buf, buflen, "/%s/%s%c/MEM%s MEM%s",
3035 		    model_names[plat_model].unit_name,
3036 		    model_names[plat_model].mem_name, memb_num,
3037 		    &mc_ff_dimm_unum_table[j][1],
3038 		    &mc_ff_dimm_unum_table[j + 1][1]);
3039 		break;
3040 	case MODEL_IKKAKU:
3041 		i = BD_BK_SLOT_TO_INDEX(sb, bank, 0);
3042 		j = (cs == 0) ? i : i + 2;
3043 		(void) snprintf(buf, buflen, "/%s/MEM%s MEM%s",
3044 		    model_names[plat_model].unit_name,
3045 		    &mc_ff_dimm_unum_table[j][1],
3046 		    &mc_ff_dimm_unum_table[j + 1][1]);
3047 		break;
3048 	default:
3049 		rv = ENXIO;
3050 	}
3051 	if (lenp) {
3052 		*lenp = strlen(buf);
3053 	}
3054 	mutex_exit(&mcmutex);
3055 	return (rv);
3056 }
3057 
3058 int
3059 opl_mc_suspend(void)
3060 {
3061 	mc_opl_t *mcp;
3062 	int i;
3063 
3064 	mutex_enter(&mcmutex);
3065 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
3066 		if ((mcp = mc_instances[i]) == NULL)
3067 			continue;
3068 		(void) mc_suspend(mcp, MC_SOFT_SUSPENDED);
3069 	}
3070 	mutex_exit(&mcmutex);
3071 
3072 	return (0);
3073 }
3074 
3075 int
3076 opl_mc_resume(void)
3077 {
3078 	mc_opl_t *mcp;
3079 	int i;
3080 
3081 	mutex_enter(&mcmutex);
3082 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
3083 		if ((mcp = mc_instances[i]) == NULL)
3084 			continue;
3085 		(void) mc_resume(mcp, MC_SOFT_SUSPENDED);
3086 	}
3087 	mutex_exit(&mcmutex);
3088 
3089 	return (0);
3090 }
3091 static void
3092 insert_mcp(mc_opl_t *mcp)
3093 {
3094 	mutex_enter(&mcmutex);
3095 	if (mc_instances[mcp->mc_board_num] != NULL) {
3096 		MC_LOG("mc-opl instance for board# %d already exists\n",
3097 		    mcp->mc_board_num);
3098 	}
3099 	mc_instances[mcp->mc_board_num] = mcp;
3100 	mutex_exit(&mcmutex);
3101 }
3102 
3103 static void
3104 delete_mcp(mc_opl_t *mcp)
3105 {
3106 	mutex_enter(&mcmutex);
3107 	mc_instances[mcp->mc_board_num] = 0;
3108 	mutex_exit(&mcmutex);
3109 }
3110 
3111 /* Error injection interface */
3112 
3113 static void
3114 mc_lock_va(uint64_t pa, caddr_t new_va)
3115 {
3116 	tte_t tte;
3117 
3118 	vtag_flushpage(new_va, (uint64_t)ksfmmup);
3119 	sfmmu_memtte(&tte, pa >> PAGESHIFT, PROC_DATA|HAT_NOSYNC, TTE8K);
3120 	tte.tte_intlo |= TTE_LCK_INT;
3121 	sfmmu_dtlb_ld_kva(new_va, &tte);
3122 }
3123 
3124 static void
3125 mc_unlock_va(caddr_t va)
3126 {
3127 	vtag_flushpage(va, (uint64_t)ksfmmup);
3128 }
3129 
3130 /* ARGSUSED */
3131 int
3132 mc_inject_error(int error_type, uint64_t pa, uint32_t flags)
3133 {
3134 	mc_opl_t *mcp;
3135 	int bank;
3136 	uint32_t dimm_addr;
3137 	uint32_t cntl;
3138 	mc_rsaddr_info_t rsaddr;
3139 	uint32_t data, stat;
3140 	int both_sides = 0;
3141 	uint64_t pa0;
3142 	int extra_injection_needed = 0;
3143 	extern void cpu_flush_ecache(void);
3144 
3145 	MC_LOG("HW mc_inject_error(%x, %lx, %x)\n", error_type, pa, flags);
3146 
3147 	mutex_enter(&mcmutex);
3148 	if ((mcp = mc_pa_to_mcp(pa)) == NULL) {
3149 		mutex_exit(&mcmutex);
3150 		MC_LOG("mc_inject_error: invalid pa\n");
3151 		return (ENOTSUP);
3152 	}
3153 
3154 	mutex_enter(&mcp->mc_lock);
3155 	mutex_exit(&mcmutex);
3156 
3157 	if (mcp->mc_status & (MC_SOFT_SUSPENDED | MC_DRIVER_SUSPENDED)) {
3158 		mutex_exit(&mcp->mc_lock);
3159 		MC_LOG("mc-opl has been suspended.  No error injection.\n");
3160 		return (EBUSY);
3161 	}
3162 
3163 	/* convert pa to offset within the board */
3164 	MC_LOG("pa %lx, offset %lx\n", pa, pa - mcp->mc_start_address);
3165 
3166 	if (!pa_is_valid(mcp, pa)) {
3167 		mutex_exit(&mcp->mc_lock);
3168 		return (EINVAL);
3169 	}
3170 
3171 	pa0 = pa - mcp->mc_start_address;
3172 
3173 	bank = pa_to_bank(mcp, pa0);
3174 
3175 	if (flags & MC_INJECT_FLAG_OTHER)
3176 		bank = bank ^ 1;
3177 
3178 	if (MC_INJECT_MIRROR(error_type) && !IS_MIRROR(mcp, bank)) {
3179 		mutex_exit(&mcp->mc_lock);
3180 		MC_LOG("Not mirror mode\n");
3181 		return (EINVAL);
3182 	}
3183 
3184 	dimm_addr = pa_to_dimm(mcp, pa0);
3185 
3186 	MC_LOG("injecting error to /LSB%d/B%d/%x\n", mcp->mc_board_num, bank,
3187 	    dimm_addr);
3188 
3189 
3190 	switch (error_type) {
3191 	case MC_INJECT_INTERMITTENT_MCE:
3192 	case MC_INJECT_PERMANENT_MCE:
3193 	case MC_INJECT_MUE:
3194 		both_sides = 1;
3195 	}
3196 
3197 	if (flags & MC_INJECT_FLAG_RESET)
3198 		ST_MAC_REG(MAC_EG_CNTL(mcp, bank), 0);
3199 
3200 	ST_MAC_REG(MAC_EG_ADD(mcp, bank), dimm_addr & MAC_EG_ADD_MASK);
3201 
3202 	if (both_sides) {
3203 		ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), 0);
3204 		ST_MAC_REG(MAC_EG_ADD(mcp, bank^1), dimm_addr &
3205 		    MAC_EG_ADD_MASK);
3206 	}
3207 
3208 	switch (error_type) {
3209 	case MC_INJECT_SUE:
3210 		extra_injection_needed = 1;
3211 		/*FALLTHROUGH*/
3212 	case MC_INJECT_UE:
3213 	case MC_INJECT_MUE:
3214 		if (flags & MC_INJECT_FLAG_PATH) {
3215 			cntl = MAC_EG_ADD_FIX | MAC_EG_FORCE_READ00 |
3216 			    MAC_EG_FORCE_READ16 | MAC_EG_RDERR_ONCE;
3217 		} else {
3218 			cntl = MAC_EG_ADD_FIX | MAC_EG_FORCE_DERR00 |
3219 			    MAC_EG_FORCE_DERR16 | MAC_EG_DERR_ONCE;
3220 		}
3221 		flags |= MC_INJECT_FLAG_ST;
3222 		break;
3223 	case MC_INJECT_INTERMITTENT_CE:
3224 	case MC_INJECT_INTERMITTENT_MCE:
3225 		if (flags & MC_INJECT_FLAG_PATH) {
3226 			cntl = MAC_EG_ADD_FIX |MAC_EG_FORCE_READ00 |
3227 			    MAC_EG_RDERR_ONCE;
3228 		} else {
3229 			cntl = MAC_EG_ADD_FIX | MAC_EG_FORCE_DERR16 |
3230 			    MAC_EG_DERR_ONCE;
3231 		}
3232 		extra_injection_needed = 1;
3233 		flags |= MC_INJECT_FLAG_ST;
3234 		break;
3235 	case MC_INJECT_PERMANENT_CE:
3236 	case MC_INJECT_PERMANENT_MCE:
3237 		if (flags & MC_INJECT_FLAG_PATH) {
3238 			cntl = MAC_EG_ADD_FIX | MAC_EG_FORCE_READ00 |
3239 			    MAC_EG_RDERR_ALWAYS;
3240 		} else {
3241 			cntl = MAC_EG_ADD_FIX | MAC_EG_FORCE_DERR16 |
3242 			    MAC_EG_DERR_ALWAYS;
3243 		}
3244 		flags |= MC_INJECT_FLAG_ST;
3245 		break;
3246 	case MC_INJECT_CMPE:
3247 		data = 0xabcdefab;
3248 		stphys(pa, data);
3249 		cpu_flush_ecache();
3250 		MC_LOG("CMPE: writing data %x to %lx\n", data, pa);
3251 		ST_MAC_REG(MAC_MIRR(mcp, bank), MAC_MIRR_BANK_EXCLUSIVE);
3252 		stphys(pa, data ^ 0xffffffff);
3253 		membar_sync();
3254 		cpu_flush_ecache();
3255 		ST_MAC_REG(MAC_MIRR(mcp, bank), 0);
3256 		MC_LOG("CMPE: write new data %xto %lx\n", data, pa);
3257 		cntl = 0;
3258 		break;
3259 	case MC_INJECT_NOP:
3260 		cntl = 0;
3261 		break;
3262 	default:
3263 		MC_LOG("mc_inject_error: invalid option\n");
3264 		cntl = 0;
3265 	}
3266 
3267 	if (cntl) {
3268 		ST_MAC_REG(MAC_EG_CNTL(mcp, bank), cntl & MAC_EG_SETUP_MASK);
3269 		ST_MAC_REG(MAC_EG_CNTL(mcp, bank), cntl);
3270 
3271 		if (both_sides) {
3272 			ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), cntl &
3273 			    MAC_EG_SETUP_MASK);
3274 			ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), cntl);
3275 		}
3276 	}
3277 
3278 	/*
3279 	 * For all injection cases except compare error, we
3280 	 * must write to the PA to trigger the error.
3281 	 */
3282 
3283 	if (flags & MC_INJECT_FLAG_ST) {
3284 		data = 0xf0e0d0c0;
3285 		MC_LOG("Writing %x to %lx\n", data, pa);
3286 		stphys(pa, data);
3287 		cpu_flush_ecache();
3288 	}
3289 
3290 
3291 	if (flags & MC_INJECT_FLAG_LD) {
3292 		if (flags & MC_INJECT_FLAG_PREFETCH) {
3293 			/*
3294 			 * Use strong prefetch operation to
3295 			 * inject MI errors.
3296 			 */
3297 			page_t *pp;
3298 			extern void mc_prefetch(caddr_t);
3299 
3300 			MC_LOG("prefetch\n");
3301 
3302 			pp = page_numtopp_nolock(pa >> PAGESHIFT);
3303 			if (pp != NULL) {
3304 				caddr_t	va, va1;
3305 
3306 				va = ppmapin(pp, PROT_READ|PROT_WRITE,
3307 				    (caddr_t)-1);
3308 				kpreempt_disable();
3309 				mc_lock_va((uint64_t)pa, va);
3310 				va1 = va + (pa & (PAGESIZE - 1));
3311 				mc_prefetch(va1);
3312 				mc_unlock_va(va);
3313 				kpreempt_enable();
3314 				ppmapout(va);
3315 
3316 				/*
3317 				 * For MI errors, we need one extra
3318 				 * injection for HW patrol to stop.
3319 				 */
3320 				extra_injection_needed = 1;
3321 			} else {
3322 				cmn_err(CE_WARN, "Cannot find page structure"
3323 				    " for PA %lx\n", pa);
3324 			}
3325 		} else {
3326 			MC_LOG("Reading from %lx\n", pa);
3327 			data = ldphys(pa);
3328 			MC_LOG("data = %x\n", data);
3329 		}
3330 
3331 		if (extra_injection_needed) {
3332 			/*
3333 			 * These are the injection cases where the
3334 			 * requested injected errors will not cause the HW
3335 			 * patrol to stop. For these cases, we need to inject
3336 			 * an extra 'real' PTRL error to force the
3337 			 * HW patrol to stop so that we can report the
3338 			 * errors injected. Note that we cannot read
3339 			 * and report error status while the HW patrol
3340 			 * is running.
3341 			 */
3342 			ST_MAC_REG(MAC_EG_CNTL(mcp, bank),
3343 			    cntl & MAC_EG_SETUP_MASK);
3344 			ST_MAC_REG(MAC_EG_CNTL(mcp, bank), cntl);
3345 
3346 			if (both_sides) {
3347 				ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), cntl &
3348 				    MAC_EG_SETUP_MASK);
3349 				ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), cntl);
3350 			}
3351 			data = 0xf0e0d0c0;
3352 			MC_LOG("Writing %x to %lx\n", data, pa);
3353 			stphys(pa, data);
3354 			cpu_flush_ecache();
3355 		}
3356 	}
3357 
3358 	if (flags & MC_INJECT_FLAG_RESTART) {
3359 		MC_LOG("Restart patrol\n");
3360 		rsaddr.mi_restartaddr.ma_bd = mcp->mc_board_num;
3361 		rsaddr.mi_restartaddr.ma_bank = bank;
3362 		rsaddr.mi_restartaddr.ma_dimm_addr = dimm_addr;
3363 		rsaddr.mi_valid = 1;
3364 		rsaddr.mi_injectrestart = 1;
3365 		(void) restart_patrol(mcp, bank, &rsaddr);
3366 	}
3367 
3368 	if (flags & MC_INJECT_FLAG_POLL) {
3369 		int running;
3370 		int ebank = (IS_MIRROR(mcp, bank)) ? MIRROR_IDX(bank) : bank;
3371 
3372 		MC_LOG("Poll patrol error\n");
3373 		stat = LD_MAC_REG(MAC_PTRL_STAT(mcp, bank));
3374 		cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
3375 		running = cntl & MAC_CNTL_PTRL_START;
3376 
3377 		if (!running &&
3378 		    (stat & (MAC_STAT_PTRL_ERRS|MAC_STAT_MI_ERRS))) {
3379 			/*
3380 			 * HW patrol stopped and we have errors to
3381 			 * report. Do it.
3382 			 */
3383 			mcp->mc_speedup_period[ebank] = 0;
3384 			rsaddr.mi_valid = 0;
3385 			rsaddr.mi_injectrestart = 0;
3386 			if (IS_MIRROR(mcp, bank)) {
3387 				mc_error_handler_mir(mcp, bank, &rsaddr);
3388 			} else {
3389 				mc_error_handler(mcp, bank, &rsaddr);
3390 			}
3391 
3392 			(void) restart_patrol(mcp, bank, &rsaddr);
3393 		} else {
3394 			/*
3395 			 * We are expecting to report injected
3396 			 * errors but the HW patrol is still running.
3397 			 * Speed up the scanning
3398 			 */
3399 			mcp->mc_speedup_period[ebank] = 2;
3400 			MAC_CMD(mcp, bank, 0);
3401 			(void) restart_patrol(mcp, bank, NULL);
3402 		}
3403 	}
3404 
3405 	mutex_exit(&mcp->mc_lock);
3406 	return (0);
3407 }
3408 
3409 void
3410 mc_stphysio(uint64_t pa, uint32_t data)
3411 {
3412 	MC_LOG("0x%x -> pa(%lx)\n", data, pa);
3413 	stphysio(pa, data);
3414 
3415 	/* force the above write to be processed by mac patrol */
3416 	data = ldphysio(pa);
3417 	MC_LOG("pa(%lx) = 0x%x\n", pa, data);
3418 }
3419 
3420 uint32_t
3421 mc_ldphysio(uint64_t pa)
3422 {
3423 	uint32_t rv;
3424 
3425 	rv = ldphysio(pa);
3426 	MC_LOG("pa(%lx) = 0x%x\n", pa, rv);
3427 	return (rv);
3428 }
3429 
3430 #define	isdigit(ch)	((ch) >= '0' && (ch) <= '9')
3431 
3432 /*
3433  * parse_unum_memory -- extract the board number and the DIMM name from
3434  * the unum.
3435  *
3436  * Return 0 for success and non-zero for a failure.
3437  */
3438 int
3439 parse_unum_memory(char *unum, int *board, char *dname)
3440 {
3441 	char *c;
3442 	char x, y, z;
3443 
3444 	if ((c = strstr(unum, "CMU")) != NULL) {
3445 		/* DC Model */
3446 		c += 3;
3447 		*board = (uint8_t)stoi(&c);
3448 		if ((c = strstr(c, "MEM")) == NULL) {
3449 			return (1);
3450 		}
3451 		c += 3;
3452 		if (strlen(c) < 3) {
3453 			return (2);
3454 		}
3455 		if ((!isdigit(c[0])) || (!(isdigit(c[1]))) ||
3456 		    ((c[2] != 'A') && (c[2] != 'B'))) {
3457 			return (3);
3458 		}
3459 		x = c[0];
3460 		y = c[1];
3461 		z = c[2];
3462 	} else if ((c = strstr(unum, "MBU_")) != NULL) {
3463 		/*  FF1/FF2/Ikkaku Model */
3464 		c += 4;
3465 		if ((c[0] != 'A') && (c[0] != 'B')) {
3466 			return (4);
3467 		}
3468 		if (plat_model == MODEL_IKKAKU) {
3469 			/* Ikkaku Model */
3470 			x = '0';
3471 			*board = 0;
3472 		} else {
3473 			/* FF1/FF2 Model */
3474 			if ((c = strstr(c, "MEMB")) == NULL) {
3475 				return (5);
3476 			}
3477 			c += 4;
3478 
3479 			x = c[0];
3480 			*board =  ((uint8_t)stoi(&c)) / 4;
3481 		}
3482 
3483 		if ((c = strstr(c, "MEM")) == NULL) {
3484 			return (6);
3485 		}
3486 		c += 3;
3487 		if (strlen(c) < 2) {
3488 			return (7);
3489 		}
3490 		if ((!isdigit(c[0])) || ((c[1] != 'A') && (c[1] != 'B'))) {
3491 			return (8);
3492 		}
3493 		y = c[0];
3494 		z = c[1];
3495 	} else {
3496 		return (9);
3497 	}
3498 	if (*board < 0) {
3499 		return (10);
3500 	}
3501 	dname[0] = x;
3502 	dname[1] = y;
3503 	dname[2] = z;
3504 	dname[3] = '\0';
3505 	return (0);
3506 }
3507 
3508 /*
3509  * mc_get_mem_sid_dimm -- Get the serial-ID for a given board and
3510  * the DIMM name.
3511  */
3512 int
3513 mc_get_mem_sid_dimm(mc_opl_t *mcp, char *dname, char *buf,
3514     int buflen, int *lenp)
3515 {
3516 	int		ret = ENODEV;
3517 	mc_dimm_info_t	*d = NULL;
3518 
3519 	if ((d = mcp->mc_dimm_list) == NULL) {
3520 		MC_LOG("mc_get_mem_sid_dimm: mc_dimm_list is NULL\n");
3521 		return (EINVAL);
3522 		}
3523 
3524 	for (; d != NULL; d = d->md_next) {
3525 		if (strcmp(d->md_dimmname, dname) == 0) {
3526 			break;
3527 		}
3528 	}
3529 	if (d != NULL) {
3530 		*lenp = strlen(d->md_serial) + strlen(d->md_partnum);
3531 		if (buflen <=  *lenp) {
3532 			cmn_err(CE_WARN, "mc_get_mem_sid_dimm: "
3533 			    "buflen is smaller than %d\n", *lenp);
3534 			ret = ENOSPC;
3535 		} else {
3536 			(void) snprintf(buf, buflen, "%s:%s",
3537 			    d->md_serial, d->md_partnum);
3538 			ret = 0;
3539 		}
3540 	}
3541 	MC_LOG("mc_get_mem_sid_dimm: Ret=%d Name=%s Serial-ID=%s\n",
3542 	    ret, dname, (ret == 0) ? buf : "");
3543 	return (ret);
3544 }
3545 
3546 int
3547 mc_set_mem_sid(mc_opl_t *mcp, char *buf, int buflen, int sb,
3548     int bank, uint32_t mf_type, uint32_t d_slot)
3549 {
3550 	int	lenp = buflen;
3551 	int	id;
3552 	int	ret;
3553 	char	*dimmnm;
3554 
3555 	if (mf_type == FLT_TYPE_INTERMITTENT_CE ||
3556 	    mf_type == FLT_TYPE_PERMANENT_CE) {
3557 		if (plat_model == MODEL_DC) {
3558 			/*
3559 			 * All DC models
3560 			 */
3561 			id = BD_BK_SLOT_TO_INDEX(0, bank, d_slot);
3562 			dimmnm = mc_dc_dimm_unum_table[id];
3563 		} else {
3564 			/*
3565 			 * All FF and Ikkaku models
3566 			 */
3567 			id = BD_BK_SLOT_TO_INDEX(sb, bank, d_slot);
3568 			dimmnm = mc_ff_dimm_unum_table[id];
3569 		}
3570 		if ((ret = mc_get_mem_sid_dimm(mcp, dimmnm, buf, buflen,
3571 		    &lenp)) != 0) {
3572 			return (ret);
3573 		}
3574 	} else {
3575 		return (1);
3576 	}
3577 
3578 	return (0);
3579 }
3580 
3581 /*
3582  * mc_get_mem_sid -- get the DIMM serial-ID corresponding to the unum.
3583  */
3584 int
3585 mc_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
3586 {
3587 	int	i;
3588 	int	ret = ENODEV;
3589 	int	board;
3590 	char	dname[MCOPL_MAX_DIMMNAME + 1];
3591 	mc_opl_t *mcp;
3592 
3593 	MC_LOG("mc_get_mem_sid: unum=%s buflen=%d\n", unum, buflen);
3594 	if ((ret = parse_unum_memory(unum, &board, dname)) != 0) {
3595 		MC_LOG("mc_get_mem_sid: unum(%s) parsing failed ret=%d\n",
3596 		    unum, ret);
3597 		return (EINVAL);
3598 	}
3599 
3600 	if (board < 0) {
3601 		MC_LOG("mc_get_mem_sid: Invalid board=%d dimm=%s\n",
3602 		    board, dname);
3603 		return (EINVAL);
3604 	}
3605 
3606 	mutex_enter(&mcmutex);
3607 	/*
3608 	 * return ENOENT if we can not find the matching board.
3609 	 */
3610 	ret = ENOENT;
3611 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
3612 		if ((mcp = mc_instances[i]) == NULL)
3613 			continue;
3614 		mutex_enter(&mcp->mc_lock);
3615 		if (mcp->mc_phys_board_num != board) {
3616 			mutex_exit(&mcp->mc_lock);
3617 			continue;
3618 		}
3619 		ret = mc_get_mem_sid_dimm(mcp, dname, buf, buflen, lenp);
3620 		if (ret == 0) {
3621 			mutex_exit(&mcp->mc_lock);
3622 			break;
3623 		}
3624 		mutex_exit(&mcp->mc_lock);
3625 	}
3626 	mutex_exit(&mcmutex);
3627 	return (ret);
3628 }
3629 
3630 /*
3631  * mc_get_mem_offset -- get the offset in a DIMM for a given physical address.
3632  */
3633 int
3634 mc_get_mem_offset(uint64_t paddr, uint64_t *offp)
3635 {
3636 	int		i;
3637 	int		ret = ENODEV;
3638 	mc_addr_t	maddr;
3639 	mc_opl_t	*mcp;
3640 
3641 	mutex_enter(&mcmutex);
3642 	for (i = 0; ((i < OPL_MAX_BOARDS) && (ret != 0)); i++) {
3643 		if ((mcp = mc_instances[i]) == NULL)
3644 			continue;
3645 		mutex_enter(&mcp->mc_lock);
3646 		if (!pa_is_valid(mcp, paddr)) {
3647 			mutex_exit(&mcp->mc_lock);
3648 			continue;
3649 		}
3650 		if (pa_to_maddr(mcp, paddr, &maddr) == 0) {
3651 			*offp = maddr.ma_dimm_addr;
3652 			ret = 0;
3653 		}
3654 		mutex_exit(&mcp->mc_lock);
3655 	}
3656 	mutex_exit(&mcmutex);
3657 	MC_LOG("mc_get_mem_offset: Ret=%d paddr=0x%lx offset=0x%lx\n",
3658 	    ret, paddr, *offp);
3659 	return (ret);
3660 }
3661 
3662 /*
3663  * dname_to_bankslot - Get the bank and slot number from the DIMM name.
3664  */
3665 int
3666 dname_to_bankslot(char *dname, int *bank, int *slot)
3667 {
3668 	int i;
3669 	int tsz;
3670 	char **tbl;
3671 
3672 	if (plat_model == MODEL_DC) {
3673 		/*
3674 		 * All DC models
3675 		 */
3676 		tbl = mc_dc_dimm_unum_table;
3677 		tsz = OPL_MAX_DIMMS;
3678 	} else {
3679 		/*
3680 		 * All FF and Ikkaku models
3681 		 */
3682 		tbl = mc_ff_dimm_unum_table;
3683 		tsz = 2 * OPL_MAX_DIMMS;
3684 	}
3685 
3686 	for (i = 0; i < tsz; i++) {
3687 		if (strcmp(dname,  tbl[i]) == 0) {
3688 			break;
3689 		}
3690 	}
3691 	if (i == tsz) {
3692 		return (1);
3693 	}
3694 	*bank = INDEX_TO_BANK(i);
3695 	*slot = INDEX_TO_SLOT(i);
3696 	return (0);
3697 }
3698 
3699 /*
3700  * mc_get_mem_addr -- get the physical address of a DIMM corresponding
3701  * to the unum and sid.
3702  */
3703 int
3704 mc_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *paddr)
3705 {
3706 	int	board;
3707 	int	bank;
3708 	int	slot;
3709 	int	i;
3710 	int	ret = ENODEV;
3711 	char	dname[MCOPL_MAX_DIMMNAME + 1];
3712 	mc_addr_t maddr;
3713 	mc_opl_t *mcp;
3714 
3715 	MC_LOG("mc_get_mem_addr: unum=%s sid=%s offset=0x%lx\n",
3716 	    unum, sid, offset);
3717 	if (parse_unum_memory(unum, &board, dname) != 0) {
3718 		MC_LOG("mc_get_mem_sid: unum(%s) parsing failed ret=%d\n",
3719 		    unum, ret);
3720 		return (EINVAL);
3721 	}
3722 
3723 	if (board < 0) {
3724 		MC_LOG("mc_get_mem_addr: Invalid board=%d dimm=%s\n",
3725 		    board, dname);
3726 		return (EINVAL);
3727 	}
3728 
3729 	mutex_enter(&mcmutex);
3730 	for (i = 0; i < OPL_MAX_BOARDS; i++) {
3731 		if ((mcp = mc_instances[i]) == NULL)
3732 			continue;
3733 		mutex_enter(&mcp->mc_lock);
3734 		if (mcp->mc_phys_board_num != board) {
3735 			mutex_exit(&mcp->mc_lock);
3736 			continue;
3737 		}
3738 
3739 		ret = dname_to_bankslot(dname, &bank, &slot);
3740 		MC_LOG("mc_get_mem_addr: bank=%d slot=%d\n", bank, slot);
3741 		if (ret != 0) {
3742 			MC_LOG("mc_get_mem_addr: dname_to_bankslot failed\n");
3743 			ret = ENODEV;
3744 		} else {
3745 			maddr.ma_bd = mcp->mc_board_num;
3746 			maddr.ma_bank =  bank;
3747 			maddr.ma_dimm_addr = offset;
3748 			ret = mcaddr_to_pa(mcp, &maddr, paddr);
3749 			if (ret != 0) {
3750 				MC_LOG("mc_get_mem_addr: "
3751 				    "mcaddr_to_pa failed\n");
3752 				ret = ENODEV;
3753 				mutex_exit(&mcp->mc_lock);
3754 				continue;
3755 			}
3756 			mutex_exit(&mcp->mc_lock);
3757 			break;
3758 		}
3759 		mutex_exit(&mcp->mc_lock);
3760 	}
3761 	mutex_exit(&mcmutex);
3762 	MC_LOG("mc_get_mem_addr: Ret=%d, Paddr=0x%lx\n", ret, *paddr);
3763 	return (ret);
3764 }
3765 
3766 static void
3767 mc_free_dimm_list(mc_dimm_info_t *d)
3768 {
3769 	mc_dimm_info_t *next;
3770 
3771 	while (d != NULL) {
3772 		next = d->md_next;
3773 		kmem_free(d, sizeof (mc_dimm_info_t));
3774 		d = next;
3775 	}
3776 }
3777 
3778 /*
3779  * mc_get_dimm_list -- get the list of dimms with serial-id info
3780  * from the SP.
3781  */
3782 mc_dimm_info_t *
3783 mc_get_dimm_list(mc_opl_t *mcp)
3784 {
3785 	uint32_t	bufsz;
3786 	uint32_t	maxbufsz;
3787 	int		ret;
3788 	int		sexp;
3789 	board_dimm_info_t *bd_dimmp;
3790 	mc_dimm_info_t	*dimm_list = NULL;
3791 
3792 	maxbufsz = bufsz = sizeof (board_dimm_info_t) +
3793 	    ((MCOPL_MAX_DIMMNAME +  MCOPL_MAX_SERIAL +
3794 	    MCOPL_MAX_PARTNUM) * OPL_MAX_DIMMS);
3795 
3796 	bd_dimmp = (board_dimm_info_t *)kmem_alloc(bufsz, KM_SLEEP);
3797 	ret = scf_get_dimminfo(mcp->mc_board_num, (void *)bd_dimmp, &bufsz);
3798 
3799 	MC_LOG("mc_get_dimm_list:  scf_service_getinfo returned=%d\n", ret);
3800 	if (ret == 0) {
3801 		sexp = sizeof (board_dimm_info_t) +
3802 		    ((bd_dimmp->bd_dnamesz +  bd_dimmp->bd_serialsz +
3803 		    bd_dimmp->bd_partnumsz) * bd_dimmp->bd_numdimms);
3804 
3805 		if ((bd_dimmp->bd_version == OPL_DIMM_INFO_VERSION) &&
3806 		    (bd_dimmp->bd_dnamesz <= MCOPL_MAX_DIMMNAME) &&
3807 		    (bd_dimmp->bd_serialsz <= MCOPL_MAX_SERIAL) &&
3808 		    (bd_dimmp->bd_partnumsz <= MCOPL_MAX_PARTNUM) &&
3809 		    (sexp <= bufsz)) {
3810 
3811 #ifdef DEBUG
3812 			if (oplmc_debug)
3813 				mc_dump_dimm_info(bd_dimmp);
3814 #endif
3815 			dimm_list = mc_prepare_dimmlist(bd_dimmp);
3816 
3817 		} else {
3818 			cmn_err(CE_WARN, "DIMM info version mismatch\n");
3819 		}
3820 	}
3821 	kmem_free(bd_dimmp, maxbufsz);
3822 	MC_LOG("mc_get_dimm_list: dimmlist=0x%p\n", (void *)dimm_list);
3823 	return (dimm_list);
3824 }
3825 
3826 /*
3827  * mc_prepare_dimmlist - Prepare the dimm list from the information
3828  * received from the SP.
3829  */
3830 mc_dimm_info_t *
3831 mc_prepare_dimmlist(board_dimm_info_t *bd_dimmp)
3832 {
3833 	char	*dimm_name;
3834 	char	*serial;
3835 	char	*part;
3836 	int	dimm;
3837 	int	dnamesz = bd_dimmp->bd_dnamesz;
3838 	int	sersz = bd_dimmp->bd_serialsz;
3839 	int	partsz = bd_dimmp->bd_partnumsz;
3840 	mc_dimm_info_t	*dimm_list = NULL;
3841 	mc_dimm_info_t	*d;
3842 
3843 	dimm_name = (char *)(bd_dimmp + 1);
3844 	for (dimm = 0; dimm < bd_dimmp->bd_numdimms; dimm++) {
3845 
3846 		d = (mc_dimm_info_t *)kmem_alloc(sizeof (mc_dimm_info_t),
3847 		    KM_SLEEP);
3848 
3849 		bcopy(dimm_name, d->md_dimmname, dnamesz);
3850 		d->md_dimmname[dnamesz] = 0;
3851 
3852 		serial = dimm_name + dnamesz;
3853 		bcopy(serial, d->md_serial, sersz);
3854 		d->md_serial[sersz] = 0;
3855 
3856 		part = serial + sersz;
3857 		bcopy(part, d->md_partnum, partsz);
3858 		d->md_partnum[partsz] = 0;
3859 
3860 		d->md_next = dimm_list;
3861 		dimm_list = d;
3862 		dimm_name = part + partsz;
3863 	}
3864 	return (dimm_list);
3865 }
3866 
3867 static int
3868 mc_get_mem_fmri(mc_flt_page_t *fpag, char **unum)
3869 {
3870 	if (fpag->fmri_addr == 0 || fpag->fmri_sz > MEM_FMRI_MAX_BUFSIZE)
3871 		return (EINVAL);
3872 
3873 	*unum = kmem_alloc(fpag->fmri_sz, KM_SLEEP);
3874 	if (copyin((void *)fpag->fmri_addr, *unum, fpag->fmri_sz) != 0) {
3875 		kmem_free(*unum, fpag->fmri_sz);
3876 		return (EFAULT);
3877 	}
3878 	return (0);
3879 }
3880 
3881 static int
3882 mc_scf_log_event(mc_flt_page_t *flt_pag)
3883 {
3884 	mc_opl_t *mcp;
3885 	int board, bank, slot;
3886 	int len, rv = 0;
3887 	char *unum, *sid;
3888 	char dname[MCOPL_MAX_DIMMNAME + 1];
3889 	size_t sid_sz;
3890 	uint64_t pa;
3891 	mc_flt_stat_t flt_stat;
3892 
3893 	if ((sid_sz = cpu_get_name_bufsize()) == 0)
3894 		return (ENOTSUP);
3895 
3896 	if ((rv = mc_get_mem_fmri(flt_pag, &unum)) != 0) {
3897 		MC_LOG("mc_scf_log_event: mc_get_mem_fmri failed\n");
3898 		return (rv);
3899 	}
3900 
3901 	sid = kmem_zalloc(sid_sz, KM_SLEEP);
3902 
3903 	if ((rv = mc_get_mem_sid(unum, sid, sid_sz, &len)) != 0) {
3904 		MC_LOG("mc_scf_log_event: mc_get_mem_sid failed\n");
3905 		goto out;
3906 	}
3907 
3908 	if ((rv = mc_get_mem_addr(unum, sid, (uint64_t)flt_pag->err_add,
3909 	    &pa)) != 0) {
3910 		MC_LOG("mc_scf_log_event: mc_get_mem_addr failed\n");
3911 		goto out;
3912 	}
3913 
3914 	if (parse_unum_memory(unum, &board, dname) != 0) {
3915 		MC_LOG("mc_scf_log_event: parse_unum_memory failed\n");
3916 		rv = EINVAL;
3917 		goto out;
3918 	}
3919 
3920 	if (board < 0) {
3921 		MC_LOG("mc_scf_log_event: Invalid board=%d dimm=%s\n",
3922 		    board, dname);
3923 		rv = EINVAL;
3924 		goto out;
3925 	}
3926 
3927 	if (dname_to_bankslot(dname, &bank, &slot) != 0) {
3928 		MC_LOG("mc_scf_log_event: dname_to_bankslot failed\n");
3929 		rv = EINVAL;
3930 		goto out;
3931 	}
3932 
3933 	mutex_enter(&mcmutex);
3934 
3935 	flt_stat.mf_err_add = flt_pag->err_add;
3936 	flt_stat.mf_err_log = flt_pag->err_log;
3937 	flt_stat.mf_flt_paddr = pa;
3938 
3939 	if ((mcp = mc_pa_to_mcp(pa)) == NULL) {
3940 		mutex_exit(&mcmutex);
3941 		MC_LOG("mc_scf_log_event: invalid pa\n");
3942 		rv = EINVAL;
3943 		goto out;
3944 	}
3945 
3946 	MC_LOG("mc_scf_log_event: DIMM%s, /LSB%d/B%d/%x, pa %lx elog %x\n",
3947 	    unum, mcp->mc_board_num, bank, flt_pag->err_add, pa,
3948 	    flt_pag->err_log);
3949 
3950 	mutex_enter(&mcp->mc_lock);
3951 
3952 	if (!pa_is_valid(mcp, pa)) {
3953 		mutex_exit(&mcp->mc_lock);
3954 		mutex_exit(&mcmutex);
3955 		rv = EINVAL;
3956 		goto out;
3957 	}
3958 
3959 	rv = 0;
3960 
3961 	mc_queue_scf_log(mcp, &flt_stat, bank);
3962 
3963 	mutex_exit(&mcp->mc_lock);
3964 	mutex_exit(&mcmutex);
3965 
3966 out:
3967 	kmem_free(unum, flt_pag->fmri_sz);
3968 	kmem_free(sid, sid_sz);
3969 
3970 	return (rv);
3971 }
3972 
3973 #ifdef DEBUG
3974 void
3975 mc_dump_dimm(char *buf, int dnamesz, int serialsz, int partnumsz)
3976 {
3977 	char dname[MCOPL_MAX_DIMMNAME + 1];
3978 	char serial[MCOPL_MAX_SERIAL + 1];
3979 	char part[ MCOPL_MAX_PARTNUM + 1];
3980 	char *b;
3981 
3982 	b = buf;
3983 	bcopy(b, dname, dnamesz);
3984 	dname[dnamesz] = 0;
3985 
3986 	b += dnamesz;
3987 	bcopy(b, serial, serialsz);
3988 	serial[serialsz] = 0;
3989 
3990 	b += serialsz;
3991 	bcopy(b, part, partnumsz);
3992 	part[partnumsz] = 0;
3993 
3994 	printf("DIMM=%s  Serial=%s PartNum=%s\n", dname, serial, part);
3995 }
3996 
3997 void
3998 mc_dump_dimm_info(board_dimm_info_t *bd_dimmp)
3999 {
4000 	int	dimm;
4001 	int	dnamesz = bd_dimmp->bd_dnamesz;
4002 	int	sersz = bd_dimmp->bd_serialsz;
4003 	int	partsz = bd_dimmp->bd_partnumsz;
4004 	char	*buf;
4005 
4006 	printf("Version=%d Board=%02d DIMMs=%d NameSize=%d "
4007 	    "SerialSize=%d PartnumSize=%d\n", bd_dimmp->bd_version,
4008 	    bd_dimmp->bd_boardnum, bd_dimmp->bd_numdimms, bd_dimmp->bd_dnamesz,
4009 	    bd_dimmp->bd_serialsz, bd_dimmp->bd_partnumsz);
4010 	printf("======================================================\n");
4011 
4012 	buf = (char *)(bd_dimmp + 1);
4013 	for (dimm = 0; dimm < bd_dimmp->bd_numdimms; dimm++) {
4014 		mc_dump_dimm(buf, dnamesz, sersz, partsz);
4015 		buf += dnamesz + sersz + partsz;
4016 	}
4017 	printf("======================================================\n");
4018 }
4019 
4020 
4021 /* ARGSUSED */
4022 static int
4023 mc_ioctl_debug(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
4024 	int *rvalp)
4025 {
4026 	caddr_t	buf, kbuf;
4027 	uint64_t pa;
4028 	int rv = 0;
4029 	int i;
4030 	uint32_t flags;
4031 	static uint32_t offset = 0;
4032 
4033 
4034 	flags = (cmd >> 4) & 0xfffffff;
4035 
4036 	cmd &= 0xf;
4037 
4038 	MC_LOG("mc_ioctl(cmd = %x, flags = %x)\n", cmd, flags);
4039 
4040 	if (arg != NULL) {
4041 		if (ddi_copyin((const void *)arg, (void *)&pa,
4042 		    sizeof (uint64_t), 0) < 0) {
4043 			rv = EFAULT;
4044 			return (rv);
4045 		}
4046 		buf = NULL;
4047 	} else {
4048 		buf = (caddr_t)kmem_alloc(PAGESIZE, KM_SLEEP);
4049 
4050 		pa = va_to_pa(buf);
4051 		pa += offset;
4052 
4053 		offset += 64;
4054 		if (offset >= PAGESIZE)
4055 			offset = 0;
4056 	}
4057 
4058 	switch (cmd) {
4059 	case MCI_CE:
4060 		(void) mc_inject_error(MC_INJECT_INTERMITTENT_CE, pa, flags);
4061 		break;
4062 	case MCI_PERM_CE:
4063 		(void) mc_inject_error(MC_INJECT_PERMANENT_CE, pa, flags);
4064 		break;
4065 	case MCI_UE:
4066 		(void) mc_inject_error(MC_INJECT_UE, pa, flags);
4067 		break;
4068 	case MCI_M_CE:
4069 		(void) mc_inject_error(MC_INJECT_INTERMITTENT_MCE, pa, flags);
4070 		break;
4071 	case MCI_M_PCE:
4072 		(void) mc_inject_error(MC_INJECT_PERMANENT_MCE, pa, flags);
4073 		break;
4074 	case MCI_M_UE:
4075 		(void) mc_inject_error(MC_INJECT_MUE, pa, flags);
4076 		break;
4077 	case MCI_CMP:
4078 		(void) mc_inject_error(MC_INJECT_CMPE, pa, flags);
4079 		break;
4080 	case MCI_NOP:
4081 		(void) mc_inject_error(MC_INJECT_NOP, pa, flags); break;
4082 	case MCI_SHOW_ALL:
4083 		mc_debug_show_all = 1;
4084 		break;
4085 	case MCI_SHOW_NONE:
4086 		mc_debug_show_all = 0;
4087 		break;
4088 	case MCI_ALLOC:
4089 		/*
4090 		 * just allocate some kernel memory and never free it
4091 		 * 512 MB seems to be the maximum size supported.
4092 		 */
4093 		cmn_err(CE_NOTE, "Allocating kmem %d MB\n", flags * 512);
4094 		for (i = 0; i < flags; i++) {
4095 			kbuf = kmem_alloc(512 * 1024 * 1024, KM_SLEEP);
4096 			cmn_err(CE_NOTE, "kmem buf %llx PA %llx\n",
4097 			    (u_longlong_t)kbuf, (u_longlong_t)va_to_pa(kbuf));
4098 		}
4099 		break;
4100 	case MCI_SUSPEND:
4101 		(void) opl_mc_suspend();
4102 		break;
4103 	case MCI_RESUME:
4104 		(void) opl_mc_resume();
4105 		break;
4106 	default:
4107 		rv = ENXIO;
4108 	}
4109 	if (buf)
4110 		kmem_free(buf, PAGESIZE);
4111 
4112 	return (rv);
4113 }
4114 
4115 #endif /* DEBUG */
4116