xref: /titanic_50/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c (revision 711890bc9379ceea66272dc8d4981812224ea86e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * memory management for serengeti dr memory
31  */
32 
33 #include <sys/obpdefs.h>
34 #include <sys/types.h>
35 #include <sys/conf.h>
36 #include <sys/ddi.h>
37 #include <sys/cpuvar.h>
38 #include <sys/memlist_impl.h>
39 #include <sys/machsystm.h>
40 #include <sys/promif.h>
41 #include <sys/mem_cage.h>
42 #include <sys/kmem.h>
43 #include <sys/note.h>
44 #include <sys/lgrp.h>
45 
46 #include <sys/sbd_ioctl.h>
47 #include <sys/sbd.h>
48 #include <sys/sbdp_priv.h>
49 #include <sys/sbdp_mem.h>
50 #include <sys/sun4asi.h>
51 #include <sys/cheetahregs.h>
52 #include <sys/cpu_module.h>
53 #include <sys/esunddi.h>
54 
55 #include <vm/page.h>
56 
57 static int	sbdp_get_meminfo(pnode_t, int, uint64_t *, uint64_t *);
58 int		mc_read_regs(pnode_t, mc_regs_t *);
59 uint64_t	mc_get_addr(pnode_t, int, uint_t *);
60 static pnode_t	mc_get_sibling_cpu(pnode_t nodeid);
61 static int	mc_get_sibling_cpu_impl(pnode_t nodeid);
62 static sbd_cond_t mc_check_sibling_cpu(pnode_t nodeid);
63 static void	_sbdp_copy_rename_end(void);
64 static int	sbdp_copy_rename__relocatable(sbdp_cr_handle_t *,
65 			struct memlist *, sbdp_rename_script_t *);
66 static int	sbdp_prep_rename_script(sbdp_cr_handle_t *);
67 static int	sbdp_get_lowest_addr_in_node(pnode_t, uint64_t *);
68 
69 extern void bcopy32_il(uint64_t, uint64_t);
70 extern void flush_ecache_il(uint64_t physaddr, size_t size, size_t linesize);
71 extern uint64_t lddphys_il(uint64_t physaddr);
72 extern uint64_t ldxasi_il(uint64_t physaddr, uint_t asi);
73 extern void sbdp_exec_script_il(sbdp_rename_script_t *rsp);
74 void sbdp_fill_bank_info(uint64_t, sbdp_bank_t **);
75 int sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks);
76 void sbdp_add_bank_to_seg(sbdp_bank_t *);
77 void sbdp_remove_bank_from_seg(sbdp_bank_t *);
78 uint64_t sbdp_determine_slice(sbdp_handle_t *);
79 sbdp_seg_t *sbdp_get_seg(uint64_t);
80 #ifdef DEBUG
81 void sbdp_print_seg(sbdp_seg_t *);
82 #endif
83 
84 /*
85  * Head to the system segments link list
86  */
87 sbdp_seg_t *sys_seg = NULL;
88 
89 uint64_t
90 sbdp_determine_slice(sbdp_handle_t *hp)
91 {
92 	int size;
93 
94 	size = sbdp_get_mem_size(hp);
95 
96 	if (size <= SG_SLICE_16G_SIZE) {
97 		return (SG_SLICE_16G_SIZE);
98 	} else if (size <= SG_SLICE_32G_SIZE) {
99 		return (SG_SLICE_32G_SIZE);
100 	} else {
101 		return (SG_SLICE_64G_SIZE);
102 	}
103 }
104 
105 /* ARGSUSED */
106 int
107 sbdp_get_mem_alignment(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *align)
108 {
109 	*align = sbdp_determine_slice(hp);
110 	return (0);
111 }
112 
113 
114 void
115 sbdp_memlist_dump(struct memlist *mlist)
116 {
117 	register struct memlist *ml;
118 
119 	if (mlist == NULL) {
120 		SBDP_DBG_MEM("memlist> EMPTY\n");
121 	} else {
122 		for (ml = mlist; ml; ml = ml->next)
123 			SBDP_DBG_MEM("memlist>  0x%" PRIx64", 0x%" PRIx64"\n",
124 			    ml->address, ml->size);
125 	}
126 }
127 
128 struct mem_arg {
129 	int	board;
130 	int	ndips;
131 	dev_info_t **list;
132 };
133 
134 /*
135  * Returns mem dip held
136  */
137 static int
138 sbdp_get_mem_dip(pnode_t node, void *arg, uint_t flags)
139 {
140 	_NOTE(ARGUNUSED(flags))
141 
142 	dev_info_t *dip;
143 	pnode_t nodeid;
144 	mem_op_t mem = {0};
145 	struct mem_arg *ap = arg;
146 
147 	if (node == OBP_BADNODE || node == OBP_NONODE)
148 		return (DDI_FAILURE);
149 
150 	mem.nodes = &nodeid;
151 	mem.board = ap->board;
152 	mem.nmem = 0;
153 
154 	(void) sbdp_is_mem(node, &mem);
155 
156 	ASSERT(mem.nmem == 0 || mem.nmem == 1);
157 
158 	if (mem.nmem == 0 || nodeid != node)
159 		return (DDI_FAILURE);
160 
161 	dip = e_ddi_nodeid_to_dip(nodeid);
162 	if (dip) {
163 		ASSERT(ap->ndips < SBDP_MAX_MEM_NODES_PER_BOARD);
164 		ap->list[ap->ndips++] = dip;
165 	}
166 	return (DDI_SUCCESS);
167 }
168 
169 struct memlist *
170 sbdp_get_memlist(sbdp_handle_t *hp, dev_info_t *dip)
171 {
172 	_NOTE(ARGUNUSED(dip))
173 
174 	int i, j, skip = 0;
175 	dev_info_t	*list[SBDP_MAX_MEM_NODES_PER_BOARD];
176 	struct mem_arg	arg = {0};
177 	uint64_t	base_pa, size;
178 	struct memlist	*mlist = NULL;
179 
180 	list[0] = NULL;
181 	arg.board = hp->h_board;
182 	arg.list = list;
183 
184 	sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
185 
186 	for (i = 0; i < arg.ndips; i++) {
187 		if (list[i] == NULL)
188 			continue;
189 
190 		size = 0;
191 		for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
192 			if (sbdp_get_meminfo(ddi_get_nodeid(list[i]), j,
193 			    &size, &base_pa)) {
194 				skip++;
195 				continue;
196 			}
197 			if (size == -1 || size == 0)
198 				continue;
199 
200 			(void) memlist_add_span(base_pa, size, &mlist);
201 		}
202 
203 		/*
204 		 * Release hold acquired in sbdp_get_mem_dip()
205 		 */
206 		ddi_release_devi(list[i]);
207 	}
208 
209 	/*
210 	 * XXX - The following two lines are from existing code.
211 	 * However, this appears to be incorrect - this check should be
212 	 * made for each dip in list i.e within the for(i) loop.
213 	 */
214 	if (skip == SBDP_MAX_MCS_PER_NODE)
215 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
216 
217 	SBDP_DBG_MEM("memlist for board %d\n", hp->h_board);
218 	sbdp_memlist_dump(mlist);
219 	return (mlist);
220 }
221 
222 struct memlist *
223 sbdp_memlist_dup(struct memlist *mlist)
224 {
225 	struct memlist *hl, *prev;
226 
227 	if (mlist == NULL)
228 		return (NULL);
229 
230 	prev = NULL;
231 	hl = NULL;
232 	for (; mlist; mlist = mlist->next) {
233 		struct memlist *mp;
234 
235 		mp = memlist_get_one();
236 		if (mp == NULL) {
237 			if (hl != NULL)
238 				memlist_free_list(hl);
239 			hl = NULL;
240 			break;
241 		}
242 		mp->address = mlist->address;
243 		mp->size = mlist->size;
244 		mp->next = NULL;
245 		mp->prev = prev;
246 
247 		if (prev == NULL)
248 			hl = mp;
249 		else
250 			prev->next = mp;
251 		prev = mp;
252 	}
253 
254 	return (hl);
255 }
256 
257 int
258 sbdp_del_memlist(sbdp_handle_t *hp, struct memlist *mlist)
259 {
260 	_NOTE(ARGUNUSED(hp))
261 
262 	memlist_free_list(mlist);
263 
264 	return (0);
265 }
266 
267 /*ARGSUSED*/
268 static void
269 sbdp_flush_ecache(uint64_t a, uint64_t b)
270 {
271 	cpu_flush_ecache();
272 }
273 
274 typedef enum {
275 	SBDP_CR_OK,
276 	SBDP_CR_MC_IDLE_ERR
277 } sbdp_cr_err_t;
278 
279 int
280 sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
281 {
282 	sbdp_bd_t	*s_bdp, *t_bdp;
283 	int		err = 0;
284 	caddr_t		mempage;
285 	ulong_t		data_area, index_area;
286 	ulong_t		e_area, e_page;
287 	int		availlen, indexlen, funclen, scriptlen;
288 	int		*indexp;
289 	time_t		copytime;
290 	int		(*funcp)();
291 	size_t		size;
292 	struct memlist	*mlist;
293 	sbdp_sr_handle_t	*srhp;
294 	sbdp_rename_script_t	*rsp;
295 	sbdp_rename_script_t	*rsbuffer;
296 	sbdp_cr_handle_t	*cph;
297 	int		linesize;
298 	uint64_t	neer;
299 	sbdp_cr_err_t	cr_err;
300 
301 	cph =  kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
302 
303 	SBDP_DBG_MEM("moving memory from memory board %d to board %d\n",
304 	    hp->h_board, t_bd);
305 
306 	s_bdp = sbdp_get_bd_info(hp->h_wnode, hp->h_board);
307 	t_bdp = sbdp_get_bd_info(hp->h_wnode, t_bd);
308 
309 	if ((s_bdp == NULL) || (t_bdp == NULL)) {
310 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
311 		return (-1);
312 	}
313 
314 	funclen = (int)((ulong_t)_sbdp_copy_rename_end -
315 			(ulong_t)sbdp_copy_rename__relocatable);
316 
317 	if (funclen > PAGESIZE) {
318 		cmn_err(CE_WARN,
319 		    "sbdp: copy-rename funclen (%d) > PAGESIZE (%d)",
320 		    funclen, PAGESIZE);
321 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
322 		return (-1);
323 	}
324 
325 	/*
326 	 * mempage will be page aligned, since we're calling
327 	 * kmem_alloc() with an exact multiple of PAGESIZE.
328 	 */
329 	mempage = kmem_alloc(PAGESIZE, KM_SLEEP);
330 
331 	SBDP_DBG_MEM("mempage = 0x%p\n", mempage);
332 
333 	/*
334 	 * Copy the code for the copy-rename routine into
335 	 * a page aligned piece of memory.  We do this to guarantee
336 	 * that we're executing within the same page and thus reduce
337 	 * the possibility of cache collisions between different
338 	 * pages.
339 	 */
340 	bcopy((caddr_t)sbdp_copy_rename__relocatable, mempage, funclen);
341 
342 	funcp = (int (*)())mempage;
343 
344 	SBDP_DBG_MEM("copy-rename funcp = 0x%p (len = 0x%x)\n", funcp, funclen);
345 
346 	/*
347 	 * Prepare data page that will contain script of
348 	 * operations to perform during copy-rename.
349 	 * Allocate temporary buffer to hold script.
350 	 */
351 
352 	size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
353 	rsbuffer = kmem_zalloc(size, KM_SLEEP);
354 
355 	cph->s_bdp = s_bdp;
356 	cph->t_bdp = t_bdp;
357 	cph->script = rsbuffer;
358 
359 	/*
360 	 * We need to make sure we don't switch cpus since we depend on the
361 	 * correct cpu processing
362 	 */
363 	affinity_set(CPU_CURRENT);
364 	scriptlen = sbdp_prep_rename_script(cph);
365 	if (scriptlen <= 0) {
366 		cmn_err(CE_WARN,
367 			"sbdp failed to prep for copy-rename");
368 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
369 		err = 1;
370 		goto cleanup;
371 	}
372 	SBDP_DBG_MEM("copy-rename script length = 0x%x\n", scriptlen);
373 
374 	indexlen = sizeof (*indexp) << 1;
375 
376 	if ((funclen + scriptlen + indexlen) > PAGESIZE) {
377 		cmn_err(CE_WARN,
378 			"sbdp: func len (%d) + script len (%d) "
379 			"+ index len (%d) > PAGESIZE (%d)",
380 			funclen, scriptlen, indexlen, PAGESIZE);
381 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
382 		err = 1;
383 		goto cleanup;
384 	}
385 
386 	linesize = cpunodes[CPU->cpu_id].ecache_linesize;
387 
388 	/*
389 	 * Find aligned area within data page to maintain script.
390 	 */
391 	data_area = (ulong_t)mempage;
392 	data_area += (ulong_t)funclen + (ulong_t)(linesize - 1);
393 	data_area &= ~((ulong_t)(linesize - 1));
394 
395 	availlen = PAGESIZE - indexlen;
396 	availlen -= (int)(data_area - (ulong_t)mempage);
397 
398 	if (availlen < scriptlen) {
399 		cmn_err(CE_WARN,
400 			"sbdp: available len (%d) < script len (%d)",
401 			availlen, scriptlen);
402 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
403 		err = 1;
404 		goto cleanup;
405 	}
406 
407 	SBDP_DBG_MEM("copy-rename script data area = 0x%lx\n",
408 		data_area);
409 
410 	bcopy((caddr_t)rsbuffer, (caddr_t)data_area, scriptlen);
411 	rsp = (sbdp_rename_script_t *)data_area;
412 
413 	index_area = data_area + (ulong_t)scriptlen +
414 			(ulong_t)(linesize - 1);
415 	index_area &= ~((ulong_t)(linesize - 1));
416 	indexp = (int *)index_area;
417 	indexp[0] = 0;
418 	indexp[1] = 0;
419 
420 	e_area = index_area + (ulong_t)indexlen;
421 	e_page = (ulong_t)mempage + PAGESIZE;
422 	if (e_area > e_page) {
423 		cmn_err(CE_WARN,
424 			"sbdp: index area size (%d) > available (%d)\n",
425 			indexlen, (int)(e_page - index_area));
426 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
427 		err = 1;
428 		goto cleanup;
429 	}
430 
431 	SBDP_DBG_MEM("copy-rename index area = 0x%p\n", indexp);
432 
433 	SBDP_DBG_MEM("cpu %d\n", CPU->cpu_id);
434 
435 	srhp = sbdp_get_sr_handle();
436 	ASSERT(srhp);
437 
438 	srhp->sr_flags = hp->h_flags;
439 
440 	copytime = lbolt;
441 
442 	mutex_enter(&s_bdp->bd_mutex);
443 	mlist = sbdp_memlist_dup(s_bdp->ml);
444 	mutex_exit(&s_bdp->bd_mutex);
445 
446 	if (mlist == NULL) {
447 		SBDP_DBG_MEM("Didn't find memory list\n");
448 	}
449 	SBDP_DBG_MEM("src\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
450 	    s_bdp->bd, s_bdp->wnode, s_bdp->bpa, s_bdp->nodes);
451 	sbdp_memlist_dump(s_bdp->ml);
452 	SBDP_DBG_MEM("tgt\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
453 	    t_bdp->bd, t_bdp->wnode, t_bdp->bpa, t_bdp->nodes);
454 	sbdp_memlist_dump(t_bdp->ml);
455 
456 	/*
457 	 * Quiesce the OS.
458 	 */
459 	if (sbdp_suspend(srhp)) {
460 		sbd_error_t	*sep;
461 		cmn_err(CE_WARN,
462 			"sbdp: failed to quiesce OS for copy-rename");
463 		sep = &srhp->sep;
464 		sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
465 		sbdp_release_sr_handle(srhp);
466 		sbdp_del_memlist(hp, mlist);
467 		err = 1;
468 		goto cleanup;
469 	}
470 
471 	/*
472 	 * =================================
473 	 * COPY-RENAME BEGIN.
474 	 * =================================
475 	 */
476 	SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
477 	    cph->t_bdp->bpa);
478 
479 	cph->ret = 0;
480 
481 	SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
482 
483 	SBDP_DBG_MEM("Flushing all of the cpu caches\n");
484 	xc_all(sbdp_flush_ecache, 0, 0);
485 
486 	/* disable CE reporting */
487 	neer = get_error_enable();
488 	set_error_enable(neer & ~EN_REG_CEEN);
489 
490 	cr_err = (*funcp)(cph, mlist, rsp);
491 
492 	/* enable CE reporting */
493 	set_error_enable(neer);
494 
495 	SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
496 	    cph->t_bdp->bpa);
497 	SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
498 	SBDP_DBG_MEM("after execking the function\n");
499 
500 	/*
501 	 * =================================
502 	 * COPY-RENAME END.
503 	 * =================================
504 	 */
505 	SBDP_DBG_MEM("err is 0x%d\n", err);
506 
507 	/*
508 	 * Resume the OS.
509 	 */
510 	sbdp_resume(srhp);
511 	if (srhp->sep.e_code) {
512 		sbd_error_t	*sep;
513 		cmn_err(CE_WARN,
514 		    "sbdp: failed to resume OS for copy-rename");
515 		sep = &srhp->sep;
516 		sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
517 		err = 1;
518 	}
519 
520 	copytime = lbolt - copytime;
521 
522 	sbdp_release_sr_handle(srhp);
523 	sbdp_del_memlist(hp, mlist);
524 
525 	SBDP_DBG_MEM("copy-rename elapsed time = %ld ticks (%ld secs)\n",
526 		copytime, copytime / hz);
527 
528 	switch (cr_err) {
529 	case SBDP_CR_OK:
530 		break;
531 	case SBDP_CR_MC_IDLE_ERR: {
532 		dev_info_t *dip;
533 		pnode_t nodeid = cph->busy_mc->node;
534 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
535 
536 		dip = e_ddi_nodeid_to_dip(nodeid);
537 
538 		ASSERT(dip != NULL);
539 
540 		(void) ddi_pathname(dip, path);
541 		ddi_release_devi(dip);
542 		cmn_err(CE_WARN, "failed to idle memory controller %s: "
543 		    "copy-rename aborted", path);
544 		kmem_free(path, MAXPATHLEN);
545 		sbdp_set_err(hp->h_err, ESBD_MEMFAIL, NULL);
546 		err = 1;
547 		break;
548 	}
549 	default:
550 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
551 		cmn_err(CE_WARN, "unknown copy-rename error code (%d)", cr_err);
552 		err = 1;
553 		break;
554 	}
555 
556 	if (err)
557 		goto cleanup;
558 
559 	/*
560 	 * Rename memory for lgroup.
561 	 * Source and target board numbers are packaged in arg.
562 	 */
563 	lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
564 		(uintptr_t)(s_bdp->bd | (t_bdp->bd << 16)));
565 
566 	/*
567 	 * swap list of banks
568 	 */
569 	sbdp_swap_list_of_banks(s_bdp, t_bdp);
570 
571 	/*
572 	 * Update the cached board info for both the source and the target
573 	 */
574 	sbdp_update_bd_info(s_bdp);
575 	sbdp_update_bd_info(t_bdp);
576 
577 	/*
578 	 * Tell the sc that we have swapped slices.
579 	 */
580 	if (sbdp_swap_slices(s_bdp->bd, t_bdp->bd) != 0) {
581 		/* This is dangerous. The in use slice could be re-used! */
582 		SBDP_DBG_MEM("swaping slices failed\n");
583 	}
584 
585 cleanup:
586 	kmem_free(rsbuffer, size);
587 	kmem_free(mempage, PAGESIZE);
588 	kmem_free(cph, sizeof (sbdp_cr_handle_t));
589 	affinity_clear();
590 
591 	return (err ? -1 : 0);
592 }
593 
594 static int
595 sbdp_copy_regs(pnode_t node, uint64_t bpa, uint64_t new_base, int inval,
596 	sbdp_rename_script_t *rsp, int *index)
597 {
598 	int		i, m;
599 	mc_regs_t	regs;
600 	uint64_t	*mc_decode;
601 
602 	if (mc_read_regs(node, &regs)) {
603 		SBDP_DBG_MEM("sbdp_copy_regs: failed to read source Decode "
604 		    "Regs");
605 		return (-1);
606 	}
607 
608 	mc_decode = regs.mc_decode;
609 
610 	m = *index;
611 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
612 		uint64_t	offset, seg_pa, tmp_base;
613 
614 		/*
615 		 * Skip invalid banks
616 		 */
617 		if ((mc_decode[i] & SG_DECODE_VALID) != SG_DECODE_VALID) {
618 			continue;
619 		}
620 
621 		tmp_base = new_base;
622 		if (!inval) {
623 			/*
624 			 * We need to calculate the offset from the base pa
625 			 * to add it appropriately to the new_base.
626 			 * The offset needs to be in UM relative to the mc
627 			 * decode register.  Since we are going from physical
628 			 * address to UM, we need to shift it by PHYS2UM_SHIFT.
629 			 * To get it ready to OR it with the MC decode reg,
630 			 * we need to shift it left MC_UM_SHIFT
631 			 */
632 			seg_pa = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
633 			offset = (seg_pa - bpa);
634 			/* Convert tmp_base into a physical address */
635 			tmp_base = (tmp_base >> MC_UM_SHIFT) << PHYS2UM_SHIFT;
636 			tmp_base += offset;
637 			/* Convert tmp_base to be MC reg ready */
638 			tmp_base = (tmp_base >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
639 		}
640 
641 		mc_decode[i] &= ~SG_DECODE_UM;
642 		mc_decode[i] |= tmp_base;
643 		mc_decode[i] |= SG_DECODE_VALID;
644 
645 		/*
646 		 * Step 1:	Write source base address to the MC
647 		 *		with present bit off.
648 		 */
649 		rsp[m].masr_addr = mc_get_addr(node, i, &rsp[m].asi);
650 		rsp[m].masr = mc_decode[i] & ~SG_DECODE_VALID;
651 		m++;
652 		/*
653 		 * Step 2:	Now rewrite the mc reg with present bit on.
654 		 */
655 		rsp[m].masr_addr = rsp[m-1].masr_addr;
656 		rsp[m].masr = mc_decode[i];
657 		rsp[m].asi = rsp[m-1].asi;
658 		m++;
659 	}
660 
661 	*index = m;
662 	return (0);
663 }
664 
665 static int
666 sbdp_get_reg_addr(pnode_t nodeid, uint64_t *pa)
667 {
668 	mc_regspace	reg;
669 	int		len;
670 
671 	len = prom_getproplen(nodeid, "reg");
672 	if (len != sizeof (mc_regspace))
673 		return (-1);
674 
675 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
676 		return (-1);
677 
678 	ASSERT(pa != NULL);
679 
680 	*pa = ((uint64_t)reg.regspec_addr_hi) << 32;
681 	*pa |= (uint64_t)reg.regspec_addr_lo;
682 
683 	return (0);
684 }
685 
686 static int
687 mc_get_sibling_cpu_impl(pnode_t mc_node)
688 {
689 	int	len, impl;
690 	pnode_t	cpu_node;
691 	char	namebuf[OBP_MAXPROPNAME];
692 
693 	cpu_node = mc_get_sibling_cpu(mc_node);
694 	if (cpu_node == OBP_NONODE) {
695 		SBDP_DBG_MEM("mc_get_sibling_cpu failed: dnode=0x%x\n",
696 		    mc_node);
697 		return (-1);
698 	}
699 
700 	len = prom_getproplen(cpu_node, "name");
701 	if (len < 0) {
702 		SBDP_DBG_MEM("invalid prom_getproplen for name prop: "
703 		    "len=%d, dnode=0x%x\n", len, cpu_node);
704 		return (-1);
705 	}
706 
707 	if (prom_getprop(cpu_node, "name", (caddr_t)namebuf) == -1) {
708 		SBDP_DBG_MEM("failed to read name property for dnode=0x%x\n",
709 		    cpu_node);
710 		return (-1);
711 	}
712 
713 	/*
714 	 * If this is a CMP node, the child has the implementation
715 	 * property.
716 	 */
717 	if (strcmp(namebuf, "cmp") == 0) {
718 		cpu_node = prom_childnode(cpu_node);
719 		ASSERT(cpu_node != OBP_NONODE);
720 	}
721 
722 	if (prom_getprop(cpu_node, "implementation#", (caddr_t)&impl) == -1) {
723 		SBDP_DBG_MEM("failed to read implementation# property for "
724 		    "dnode=0x%x\n", cpu_node);
725 		return (-1);
726 	}
727 
728 	SBDP_DBG_MEM("mc_get_sibling_cpu_impl: found impl=0x%x, dnode=0x%x\n",
729 	    impl, cpu_node);
730 
731 	return (impl);
732 }
733 
734 /*
735  * Provide EMU Activity Status register ASI and address.  Only valid for
736  * Panther processors.
737  */
738 static int
739 mc_get_idle_reg(pnode_t nodeid, uint64_t *addr, uint_t *asi)
740 {
741 	int	portid;
742 	uint64_t reg_pa;
743 
744 	ASSERT(nodeid != OBP_NONODE);
745 	ASSERT(mc_get_sibling_cpu_impl(nodeid) == PANTHER_IMPL);
746 
747 	if (prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0 ||
748 	    portid == -1) {
749 		SBDP_DBG_MEM("mc_get_idle_reg: failed to read portid prop "
750 		    "for dnode=0x%x\n", nodeid);
751 		return (-1);
752 	}
753 
754 	if (sbdp_get_reg_addr(nodeid, &reg_pa) != 0) {
755 		SBDP_DBG_MEM("mc_get_idle_reg: failed to read reg prop "
756 		    "for dnode=0x%x\n", nodeid);
757 		return (-1);
758 	}
759 
760 	/*
761 	 * Local access will be via ASI 0x4a, otherwise via Safari PIO.
762 	 * This assumes the copy-rename will later run on the same proc,
763 	 * hence there is an assumption we are already bound.
764 	 */
765 	ASSERT(curthread->t_bound_cpu == CPU);
766 	if (SG_CPUID_TO_PORTID(CPU->cpu_id) == portid) {
767 		*addr = ASI_EMU_ACT_STATUS_VA;
768 		*asi = ASI_SAFARI_CONFIG;
769 	} else {
770 		*addr = MC_ACTIVITY_STATUS(reg_pa);
771 		*asi = ASI_IO;
772 	}
773 
774 	return (0);
775 }
776 
777 /*
778  * If non-Panther board, add phys_banks entry for each physical bank.
779  * If Panther board, add mc_idle_regs entry for each EMU Activity Status
780  * register.  Increment the array indices b_idx and r_idx for each entry
781  * populated by this routine.
782  *
783  * The caller is responsible for allocating sufficient array entries.
784  */
785 static int
786 sbdp_prep_mc_idle_one(sbdp_bd_t *bp, sbdp_rename_script_t phys_banks[],
787     int *b_idx, sbdp_mc_idle_script_t mc_idle_regs[], int *r_idx)
788 {
789 	int		i, j;
790 	pnode_t		*memnodes;
791 	mc_regs_t	regs;
792 	uint64_t	addr;
793 	uint_t		asi;
794 	sbd_cond_t	sibling_cpu_cond;
795 	int		impl = -1;
796 
797 	memnodes = bp->nodes;
798 
799 	for (i = 0; i < SBDP_MAX_MEM_NODES_PER_BOARD; i++) {
800 		if (memnodes[i] == OBP_NONODE) {
801 			continue;
802 		}
803 
804 		/* MC should not be accessed if cpu has failed  */
805 		sibling_cpu_cond = mc_check_sibling_cpu(memnodes[i]);
806 		if (sibling_cpu_cond == SBD_COND_FAILED ||
807 		    sibling_cpu_cond == SBD_COND_UNUSABLE) {
808 			SBDP_DBG_MEM("sbdp: skipping MC with failed cpu: "
809 			    "board=%d, mem node=%d, condition=%d",
810 			    bp->bd, i, sibling_cpu_cond);
811 			continue;
812 		}
813 
814 		/*
815 		 * Initialize the board cpu type, assuming all board cpus are
816 		 * the same type.  This is true of all Cheetah-based processors.
817 		 * Failure to read the cpu type is considered a fatal error.
818 		 */
819 		if (impl == -1) {
820 			impl = mc_get_sibling_cpu_impl(memnodes[i]);
821 			if (impl == -1) {
822 				SBDP_DBG_MEM("sbdp: failed to get cpu impl "
823 				    "for MC dnode=0x%x\n", memnodes[i]);
824 				return (-1);
825 			}
826 		}
827 
828 		switch (impl) {
829 		case CHEETAH_IMPL:
830 		case CHEETAH_PLUS_IMPL:
831 		case JAGUAR_IMPL:
832 			if (mc_read_regs(memnodes[i], &regs)) {
833 				SBDP_DBG_MEM("sbdp: failed to read source "
834 				    "Decode Regs of board %d", bp->bd);
835 				return (-1);
836 			}
837 
838 			for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
839 				uint64_t mc_decode = regs.mc_decode[j];
840 
841 				if ((mc_decode & SG_DECODE_VALID) !=
842 				    SG_DECODE_VALID) {
843 					continue;
844 				}
845 
846 				addr = (MC_BASE(mc_decode) << PHYS2UM_SHIFT) |
847 				    (MC_LM(mc_decode) << MC_LM_SHIFT);
848 
849 				phys_banks[*b_idx].masr_addr = addr;
850 				phys_banks[*b_idx].masr = 0;	/* unused */
851 				phys_banks[*b_idx].asi = ASI_MEM;
852 				(*b_idx)++;
853 			}
854 			break;
855 		case PANTHER_IMPL:
856 			if (mc_get_idle_reg(memnodes[i], &addr, &asi)) {
857 				return (-1);
858 			}
859 
860 			mc_idle_regs[*r_idx].addr = addr;
861 			mc_idle_regs[*r_idx].asi = asi;
862 			mc_idle_regs[*r_idx].node = memnodes[i];
863 			mc_idle_regs[*r_idx].bd_id = bp->bd;
864 			(*r_idx)++;
865 			break;
866 		default:
867 			cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
868 			    impl);
869 			ASSERT(0);
870 			return (-1);
871 		}
872 	}
873 
874 	return (0);
875 }
876 
877 /*
878  * For non-Panther MCs that do not support read-bypass-write, we do a read
879  * to each physical bank, relying on the reads to block until all outstanding
880  * write requests have completed.  This mechanism is referred to as the bus
881  * sync list and is used for Cheetah, Cheetah+, and Jaguar processors.  The
882  * bus sync list PAs for the source and target are kept together and comprise
883  * Section 1 of the rename script.
884  *
885  * For Panther processors that support the EMU Activity Status register,
886  * we ensure the writes have completed by polling the MCU_ACT_STATUS
887  * field several times to make sure the MC queues are empty.  The
888  * EMU Activity Status register PAs for the source and target are
889  * kept together and comprise Section 2 of the rename script.
890  */
891 static int
892 sbdp_prep_mc_idle_script(sbdp_bd_t *s_bp, sbdp_bd_t *t_bp,
893     sbdp_rename_script_t *rsp, int *rsp_idx)
894 {
895 	sbdp_rename_script_t *phys_banks;
896 	sbdp_mc_idle_script_t *mc_idle_regs;
897 	int	max_banks, max_regs;
898 	size_t	bsize, msize;
899 	int	nbanks = 0, nregs = 0;
900 	int	i;
901 
902 	/* CONSTCOND */
903 	ASSERT(sizeof (sbdp_rename_script_t) ==
904 	    sizeof (sbdp_mc_idle_script_t));
905 
906 	/* allocate space for both source and target */
907 	max_banks = SBDP_MAX_MEM_NODES_PER_BOARD *
908 	    SG_MAX_BANKS_PER_MC * 2;
909 	max_regs = SBDP_MAX_MEM_NODES_PER_BOARD * 2;
910 
911 	bsize = sizeof (sbdp_rename_script_t) * max_banks;
912 	msize = sizeof (sbdp_mc_idle_script_t) * max_regs;
913 
914 	phys_banks = kmem_zalloc(bsize, KM_SLEEP);
915 	mc_idle_regs = kmem_zalloc(msize, KM_SLEEP);
916 
917 	if (sbdp_prep_mc_idle_one(t_bp, phys_banks, &nbanks,
918 	    mc_idle_regs, &nregs) != 0 ||
919 	    sbdp_prep_mc_idle_one(s_bp, phys_banks, &nbanks,
920 	    mc_idle_regs, &nregs) != 0) {
921 		kmem_free(phys_banks, bsize);
922 		kmem_free(mc_idle_regs, msize);
923 		return (-1);
924 	}
925 
926 	/* section 1 */
927 	for (i = 0; i < nbanks; i++)
928 		rsp[(*rsp_idx)++] = phys_banks[i];
929 
930 	/* section 2 */
931 	for (i = 0; i < nregs; i++)
932 		rsp[(*rsp_idx)++] = *(sbdp_rename_script_t *)&mc_idle_regs[i];
933 
934 	kmem_free(phys_banks, bsize);
935 	kmem_free(mc_idle_regs, msize);
936 
937 	return (0);
938 }
939 
940 /*
941  * code assumes single mem-unit.
942  */
943 static int
944 sbdp_prep_rename_script(sbdp_cr_handle_t *cph)
945 {
946 	pnode_t			*s_nodes, *t_nodes;
947 	int			m = 0, i;
948 	sbdp_bd_t		s_bd, t_bd, *s_bdp, *t_bdp;
949 	sbdp_rename_script_t	*rsp;
950 	uint64_t		new_base, old_base, temp_base;
951 	int			s_num, t_num;
952 
953 	mutex_enter(&cph->s_bdp->bd_mutex);
954 	s_bd = *cph->s_bdp;
955 	mutex_exit(&cph->s_bdp->bd_mutex);
956 	mutex_enter(&cph->t_bdp->bd_mutex);
957 	t_bd = *cph->t_bdp;
958 	mutex_exit(&cph->t_bdp->bd_mutex);
959 
960 	s_bdp = &s_bd;
961 	t_bdp = &t_bd;
962 	s_nodes = s_bdp->nodes;
963 	t_nodes = t_bdp->nodes;
964 	s_num = s_bdp->nnum;
965 	t_num = t_bdp->nnum;
966 	rsp = cph->script;
967 
968 	/*
969 	 * Calculate the new base address for the target bd
970 	 */
971 
972 	new_base = (s_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
973 
974 	/*
975 	 * Calculate the old base address for the source bd
976 	 */
977 
978 	old_base = (t_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
979 
980 	temp_base = SG_INVAL_UM;
981 
982 	SBDP_DBG_MEM("new 0x%lx old_base ox%lx temp_base 0x%lx\n", new_base,
983 	    old_base, temp_base);
984 
985 	m = 0;
986 
987 	/*
988 	 * Ensure the MC queues have been idled on the source and target
989 	 * following the copy.
990 	 */
991 	if (sbdp_prep_mc_idle_script(s_bdp, t_bdp, rsp, &m) < 0)
992 		return (-1);
993 
994 	/*
995 	 * Script section terminator
996 	 */
997 	rsp[m].masr_addr = 0ull;
998 	rsp[m].masr = 0;
999 	rsp[m].asi = 0;
1000 	m++;
1001 
1002 	/*
1003 	 * Invalidate the base in the target mc registers
1004 	 */
1005 	for (i = 0; i < t_num; i++) {
1006 		if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, temp_base, 1, rsp,
1007 		    &m) < 0)
1008 			return (-1);
1009 	}
1010 	/*
1011 	 * Invalidate the base in the source mc registers
1012 	 */
1013 	for (i = 0; i < s_num; i++) {
1014 		if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, temp_base, 1, rsp,
1015 		    &m) < 0)
1016 			return (-1);
1017 	}
1018 	/*
1019 	 * Copy the new base into the targets mc registers
1020 	 */
1021 	for (i = 0; i < t_num; i++) {
1022 		if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, new_base, 0, rsp,
1023 		    &m) < 0)
1024 			return (-1);
1025 	}
1026 	/*
1027 	 * Copy the old base into the source mc registers
1028 	 */
1029 	for (i = 0; i < s_num; i++) {
1030 		if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, old_base, 0, rsp,
1031 		    &m) < 0)
1032 			return (-1);
1033 	}
1034 	/*
1035 	 * Zero masr_addr value indicates the END.
1036 	 */
1037 	rsp[m].masr_addr = 0ull;
1038 	rsp[m].masr = 0;
1039 	rsp[m].asi = 0;
1040 	m++;
1041 
1042 #ifdef DEBUG
1043 	{
1044 		int	i;
1045 
1046 		SBDP_DBG_MEM("dumping copy-rename script:\n");
1047 		for (i = 0; i < m; i++) {
1048 			SBDP_DBG_MEM("0x%lx = 0x%lx, asi 0x%x\n",
1049 				rsp[i].masr_addr, rsp[i].masr, rsp[i].asi);
1050 		}
1051 		DELAY(1000000);
1052 	}
1053 #endif /* DEBUG */
1054 
1055 	return (m * sizeof (sbdp_rename_script_t));
1056 }
1057 
1058 /*
1059  * EMU Activity Status Register needs to be read idle several times.
1060  * See Panther PRM 12.5.
1061  */
1062 #define	SBDP_MCU_IDLE_RETRIES	10
1063 #define	SBDP_MCU_IDLE_READS	3
1064 
1065 /*
1066  * Using the "__relocatable" suffix informs DTrace providers (and anything
1067  * else, for that matter) that this function's text may be manually relocated
1068  * elsewhere before it is executed.  That is, it cannot be safely instrumented
1069  * with any methodology that is PC-relative.
1070  */
1071 static int
1072 sbdp_copy_rename__relocatable(sbdp_cr_handle_t *hp, struct memlist *mlist,
1073 		register sbdp_rename_script_t *rsp)
1074 {
1075 	sbdp_cr_err_t	err = SBDP_CR_OK;
1076 	size_t		csize;
1077 	size_t		linesize;
1078 	uint_t		size;
1079 	uint64_t	caddr;
1080 	uint64_t	s_base, t_base;
1081 	sbdp_bd_t	*s_sbp, *t_sbp;
1082 	struct memlist	*ml;
1083 	sbdp_mc_idle_script_t *isp;
1084 	int		i;
1085 
1086 	caddr = ecache_flushaddr;
1087 	csize = (size_t)(cpunodes[CPU->cpu_id].ecache_size * 2);
1088 	linesize = (size_t)(cpunodes[CPU->cpu_id].ecache_linesize);
1089 
1090 	size = 0;
1091 	s_sbp = hp->s_bdp;
1092 	t_sbp = hp->t_bdp;
1093 
1094 	s_base = (uint64_t)s_sbp->bpa;
1095 	t_base = (uint64_t)t_sbp->bpa;
1096 
1097 	hp->ret = s_base;
1098 	/*
1099 	 * DO COPY.
1100 	 */
1101 	for (ml = mlist; ml; ml = ml->next) {
1102 		uint64_t	s_pa, t_pa;
1103 		uint64_t	nbytes;
1104 
1105 		s_pa = ml->address;
1106 		t_pa = t_base + (ml->address - s_base);
1107 		nbytes = ml->size;
1108 
1109 		size += nbytes;
1110 		while (nbytes != 0ull) {
1111 			/*
1112 			 * This copy does NOT use an ASI
1113 			 * that avoids the Ecache, therefore
1114 			 * the dst_pa addresses may remain
1115 			 * in our Ecache after the dst_pa
1116 			 * has been removed from the system.
1117 			 * A subsequent write-back to memory
1118 			 * will cause an ARB-stop because the
1119 			 * physical address no longer exists
1120 			 * in the system. Therefore we must
1121 			 * flush out local Ecache after we
1122 			 * finish the copy.
1123 			 */
1124 
1125 			/* copy 32 bytes at src_pa to dst_pa */
1126 			bcopy32_il(s_pa, t_pa);
1127 
1128 			/* increment by 32 bytes */
1129 			s_pa += (4 * sizeof (uint64_t));
1130 			t_pa += (4 * sizeof (uint64_t));
1131 
1132 			/* decrement by 32 bytes */
1133 			nbytes -= (4 * sizeof (uint64_t));
1134 		}
1135 	}
1136 
1137 	/*
1138 	 * Since bcopy32_il() does NOT use an ASI to bypass
1139 	 * the Ecache, we need to flush our Ecache after
1140 	 * the copy is complete.
1141 	 */
1142 	flush_ecache_il(caddr, csize, linesize);	/* inline version */
1143 
1144 	/*
1145 	 * Non-Panther MCs are idled by reading each physical bank.
1146 	 */
1147 	for (i = 0; rsp[i].asi == ASI_MEM; i++) {
1148 		(void) lddphys_il(rsp[i].masr_addr);
1149 	}
1150 
1151 	isp = (sbdp_mc_idle_script_t *)&rsp[i];
1152 
1153 	/*
1154 	 * Panther MCs are idled by polling until the MCU idle state
1155 	 * is read SBDP_MCU_IDLE_READS times in succession.
1156 	 */
1157 	while (isp->addr != 0ull) {
1158 		for (i = 0; i < SBDP_MCU_IDLE_RETRIES; i++) {
1159 			register uint64_t v;
1160 			register int n_idle = 0;
1161 
1162 
1163 			do {
1164 				v = ldxasi_il(isp->addr, isp->asi) &
1165 				    MCU_ACT_STATUS;
1166 			} while (v != MCU_ACT_STATUS &&
1167 			    ++n_idle < SBDP_MCU_IDLE_READS);
1168 
1169 			if (n_idle == SBDP_MCU_IDLE_READS)
1170 				break;
1171 		}
1172 
1173 		if (i == SBDP_MCU_IDLE_RETRIES) {
1174 			/* bailout */
1175 			hp->busy_mc = isp;
1176 			return (SBDP_CR_MC_IDLE_ERR);
1177 		}
1178 
1179 		isp++;
1180 	}
1181 
1182 	/* skip terminator */
1183 	isp++;
1184 
1185 	/*
1186 	 * The following inline assembly routine caches
1187 	 * the rename script and then caches the code that
1188 	 * will do the rename.  This is necessary
1189 	 * so that we don't have any memory references during
1190 	 * the reprogramming.  We accomplish this by first
1191 	 * jumping through the code to guarantee it's cached
1192 	 * before we actually execute it.
1193 	 */
1194 	sbdp_exec_script_il((sbdp_rename_script_t *)isp);
1195 
1196 	return (err);
1197 }
1198 static void
1199 _sbdp_copy_rename_end(void)
1200 {
1201 	/*
1202 	 * IMPORTANT:   This function's location MUST be located immediately
1203 	 *		following sbdp_copy_rename__relocatable to accurately
1204 	 *		estimate its size.  Note that this assumes (!)the
1205 	 *		compiler keeps these functions in the order in which
1206 	 *		they appear :-o
1207 	 */
1208 }
1209 int
1210 sbdp_memory_rename(sbdp_handle_t *hp)
1211 {
1212 #ifdef lint
1213 	/*
1214 	 * Delete when implemented
1215 	 */
1216 	hp = hp;
1217 #endif
1218 	return (0);
1219 }
1220 
1221 
1222 /*
1223  * In Serengeti this is a nop
1224  */
1225 int
1226 sbdp_post_configure_mem(sbdp_handle_t *hp)
1227 {
1228 #ifdef lint
1229 	hp = hp;
1230 #endif
1231 	return (0);
1232 }
1233 
1234 /*
1235  * In Serengeti this is a nop
1236  */
1237 int
1238 sbdp_post_unconfigure_mem(sbdp_handle_t *hp)
1239 {
1240 #ifdef lint
1241 	hp = hp;
1242 #endif
1243 	return (0);
1244 }
1245 
1246 /* ARGSUSED */
1247 int
1248 sbdphw_disable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1249 {
1250 	return (0);
1251 }
1252 
1253 /* ARGSUSED */
1254 int
1255 sbdphw_enable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1256 {
1257 	return (0);
1258 }
1259 
1260 /*
1261  * We are assuming one memory node therefore the base address is the lowest
1262  * segment possible
1263  */
1264 #define	PA_ABOVE_MAX	(0x8000000000000000ull)
1265 int
1266 sbdphw_get_base_physaddr(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *pa)
1267 {
1268 	_NOTE(ARGUNUSED(hp))
1269 
1270 	int i, board = -1, wnode;
1271 	pnode_t	nodeid;
1272 	struct mem_arg arg = {0};
1273 	uint64_t seg_pa, tmp_pa;
1274 	dev_info_t *list[SBDP_MAX_MEM_NODES_PER_BOARD];
1275 	int rc;
1276 
1277 	if (dip == NULL)
1278 		return (-1);
1279 
1280 	nodeid = ddi_get_nodeid(dip);
1281 
1282 	if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1283 		return (-1);
1284 
1285 	list[0] = NULL;
1286 	arg.board = board;
1287 	arg.list = list;
1288 
1289 	(void) sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
1290 
1291 	if (arg.ndips <= 0)
1292 		return (-1);
1293 
1294 	seg_pa = PA_ABOVE_MAX;
1295 
1296 	rc = -1;
1297 	for (i = 0; i < arg.ndips; i++) {
1298 		if (list[i] == NULL)
1299 			continue;
1300 		if (sbdp_get_lowest_addr_in_node(ddi_get_nodeid(list[i]),
1301 		    &tmp_pa) == 0) {
1302 			rc = 0;
1303 			if (tmp_pa < seg_pa)
1304 				seg_pa = tmp_pa;
1305 		}
1306 
1307 		/*
1308 		 * Release hold acquired in sbdp_get_mem_dip()
1309 		 */
1310 		ddi_release_devi(list[i]);
1311 	}
1312 
1313 	if (rc == 0)
1314 		*pa = seg_pa;
1315 	else {
1316 		/*
1317 		 * Record the fact that an error has occurred
1318 		 */
1319 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1320 	}
1321 
1322 	return (rc);
1323 }
1324 
1325 static int
1326 sbdp_get_lowest_addr_in_node(pnode_t node, uint64_t *pa)
1327 {
1328 	uint64_t	mc_decode, seg_pa, tmp_pa;
1329 	mc_regs_t	mc_regs, *mc_regsp = &mc_regs;
1330 	int		i, valid;
1331 	int		rc;
1332 
1333 
1334 	seg_pa = PA_ABOVE_MAX;
1335 
1336 	if (mc_read_regs(node, mc_regsp)) {
1337 		SBDP_DBG_MEM("sbdp_get_lowest_addr_in_node: failed to "
1338 		    "read source Decode Regs\n");
1339 		return (-1);
1340 	}
1341 
1342 	rc = -1;
1343 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1344 		mc_decode = mc_regsp->mc_decode[i];
1345 		valid = mc_decode >> MC_VALID_SHIFT;
1346 		tmp_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1347 		if (valid)
1348 			rc = 0;
1349 		if (valid && (tmp_pa < seg_pa))
1350 			seg_pa = tmp_pa;
1351 	}
1352 
1353 	if (rc == 0)
1354 		*pa = seg_pa;
1355 
1356 	return (rc);
1357 }
1358 
1359 int
1360 sbdp_is_mem(pnode_t node, void *arg)
1361 {
1362 	mem_op_t	*memp = (mem_op_t *)arg;
1363 	char		type[OBP_MAXPROPNAME];
1364 	int		bd;
1365 	pnode_t		*list;
1366 	int		board;
1367 	char		name[OBP_MAXDRVNAME];
1368 	int		len;
1369 
1370 	ASSERT(memp);
1371 
1372 	list = memp->nodes;
1373 	board = memp->board;
1374 
1375 	/*
1376 	 * Make sure that this node doesn't have its status
1377 	 * as failed
1378 	 */
1379 	if (sbdp_get_comp_status(node) != SBD_COND_OK) {
1380 		return (DDI_FAILURE);
1381 	}
1382 
1383 	len = prom_getproplen(node, "device_type");
1384 	if ((len > 0) && (len < OBP_MAXPROPNAME))
1385 		(void) prom_getprop(node, "device_type", (caddr_t)type);
1386 	else
1387 		type[0] = '\0';
1388 
1389 	if (strcmp(type, "memory-controller") == 0) {
1390 		int	wnode;
1391 
1392 		if (sbdp_get_bd_and_wnode_num(node, &bd, &wnode) < 0)
1393 			return (DDI_FAILURE);
1394 
1395 		if (bd == board) {
1396 			/*
1397 			 * Make sure we don't overwrite the array
1398 			 */
1399 			if (memp->nmem >= SBDP_MAX_MEM_NODES_PER_BOARD)
1400 				return (DDI_FAILURE);
1401 			(void) prom_getprop(node, OBP_NAME, (caddr_t)name);
1402 			SBDP_DBG_MEM("name %s  boot bd %d board %d\n", name,
1403 			    board, bd);
1404 			list[memp->nmem++] = node;
1405 			return (DDI_SUCCESS);
1406 		}
1407 	}
1408 
1409 	return (DDI_FAILURE);
1410 }
1411 
1412 static int
1413 sbdp_get_meminfo(pnode_t nodeid, int mc, uint64_t *size, uint64_t *base_pa)
1414 {
1415 	int		board, wnode;
1416 	int		valid;
1417 	mc_regs_t	mc_regs, *mc_regsp = &mc_regs;
1418 	uint64_t	mc_decode = 0;
1419 
1420 	if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1421 		return (-1);
1422 
1423 	if (mc_read_regs(nodeid, mc_regsp)) {
1424 		SBDP_DBG_MEM("sbdp_get_meminfo: failed to read source "
1425 		    "Decode Regs");
1426 		return (-1);
1427 	}
1428 	/*
1429 	 * Calculate memory size
1430 	 */
1431 	mc_decode = mc_regsp->mc_decode[mc];
1432 
1433 	/*
1434 	 * Check the valid bit to see if bank is there
1435 	 */
1436 	valid = mc_decode >> MC_VALID_SHIFT;
1437 	if (valid) {
1438 		*size = MC_UK2SPAN(mc_decode);
1439 		*base_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1440 	}
1441 
1442 	return (0);
1443 }
1444 
1445 
1446 /*
1447  * Luckily for us mem nodes and cpu/CMP nodes are siblings.  All we need to
1448  * do is search in the same branch as the mem node for its sibling cpu or
1449  * CMP node.
1450  */
1451 pnode_t
1452 mc_get_sibling_cpu(pnode_t nodeid)
1453 {
1454 	int	portid;
1455 
1456 	if (prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid) < 0)
1457 		return (OBP_NONODE);
1458 
1459 	/*
1460 	 * cpus and memory are siblings so we don't need to traverse
1461 	 * the whole tree, just a branch
1462 	 */
1463 	return (sbdp_find_nearby_cpu_by_portid(nodeid, portid));
1464 }
1465 
1466 /*
1467  * Given a memory node, check it's sibling cpu or CMP to see if
1468  * access to mem will be ok. We need to search for the node and
1469  * if found get its condition.
1470  */
1471 sbd_cond_t
1472 mc_check_sibling_cpu(pnode_t nodeid)
1473 {
1474 	pnode_t	cpu_node;
1475 	sbd_cond_t	cond;
1476 	int		i;
1477 
1478 	cpu_node = mc_get_sibling_cpu(nodeid);
1479 
1480 	cond = sbdp_get_comp_status(cpu_node);
1481 
1482 	if (cond == SBD_COND_OK) {
1483 		int 		wnode;
1484 		int		bd;
1485 		int		unit;
1486 		int		portid;
1487 
1488 		if (sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) < 0)
1489 			return (SBD_COND_UNKNOWN);
1490 
1491 		(void) prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid);
1492 
1493 		/*
1494 		 * Access to the memory controller should not
1495 		 * be attempted if any of the cores are marked
1496 		 * as being in reset.
1497 		 */
1498 		for (i = 0; i < SBDP_MAX_CORES_PER_CMP; i++) {
1499 			unit = SG_PORTID_TO_CPU_UNIT(portid, i);
1500 			if (sbdp_is_cpu_present(wnode, bd, unit) &&
1501 			    sbdp_is_cpu_in_reset(wnode, bd, unit)) {
1502 				cond = SBD_COND_UNUSABLE;
1503 				break;
1504 			}
1505 		}
1506 	}
1507 
1508 	return (cond);
1509 }
1510 
1511 int
1512 mc_read_regs(pnode_t nodeid, mc_regs_t *mc_regsp)
1513 {
1514 	int			len;
1515 	uint64_t		mc_addr, mask;
1516 	mc_regspace		reg;
1517 	sbd_cond_t		sibling_cpu_cond;
1518 	int			local_mc;
1519 	int			portid;
1520 	int			i;
1521 
1522 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1523 	    (portid == -1))
1524 		return (-1);
1525 
1526 	/*
1527 	 * mc should not be accessed if their corresponding cpu
1528 	 * has failed.
1529 	 */
1530 	sibling_cpu_cond = mc_check_sibling_cpu(nodeid);
1531 
1532 	if ((sibling_cpu_cond == SBD_COND_FAILED) ||
1533 	    (sibling_cpu_cond == SBD_COND_UNUSABLE)) {
1534 		return (-1);
1535 	}
1536 
1537 	len = prom_getproplen(nodeid, "reg");
1538 	if (len != sizeof (mc_regspace))
1539 		return (-1);
1540 
1541 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
1542 		return (-1);
1543 
1544 	mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1545 	mc_addr |= (uint64_t)reg.regspec_addr_lo;
1546 
1547 	/*
1548 	 * Make sure we don't switch cpus
1549 	 */
1550 	affinity_set(CPU_CURRENT);
1551 	if (portid == cpunodes[CPU->cpu_id].portid)
1552 		local_mc = 1;
1553 	else
1554 		local_mc = 0;
1555 
1556 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
1557 		mask = SG_REG_2_OFFSET(i);
1558 
1559 		/*
1560 		 * If the memory controller is local to this CPU, we use
1561 		 * the special ASI to read the decode registers.
1562 		 * Otherwise, we load the values from a magic address in
1563 		 * I/O space.
1564 		 */
1565 		if (local_mc) {
1566 			mc_regsp->mc_decode[i] = lddmcdecode(
1567 			    mask & MC_OFFSET_MASK);
1568 		} else {
1569 			mc_regsp->mc_decode[i] = lddphysio(
1570 			    (mc_addr | mask));
1571 		}
1572 	}
1573 	affinity_clear();
1574 
1575 	return (0);
1576 }
1577 
1578 uint64_t
1579 mc_get_addr(pnode_t nodeid, int mc, uint_t *asi)
1580 {
1581 	int			len;
1582 	uint64_t		mc_addr, addr;
1583 	mc_regspace		reg;
1584 	int			portid;
1585 	int			local_mc;
1586 
1587 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1588 	    (portid == -1))
1589 		return (-1);
1590 
1591 	len = prom_getproplen(nodeid, "reg");
1592 	if (len != sizeof (mc_regspace))
1593 		return (-1);
1594 
1595 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
1596 		return (-1);
1597 
1598 	mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1599 	mc_addr |= (uint64_t)reg.regspec_addr_lo;
1600 
1601 	/*
1602 	 * Make sure we don't switch cpus
1603 	 */
1604 	affinity_set(CPU_CURRENT);
1605 	if (portid == cpunodes[CPU->cpu_id].portid)
1606 		local_mc = 1;
1607 	else
1608 		local_mc = 0;
1609 
1610 	if (local_mc) {
1611 		*asi = ASI_MC_DECODE;
1612 		addr = SG_REG_2_OFFSET(mc) & MC_OFFSET_MASK;
1613 	} else {
1614 		*asi = ASI_IO;
1615 		addr = SG_REG_2_OFFSET(mc) | mc_addr;
1616 	}
1617 	affinity_clear();
1618 
1619 	return (addr);
1620 }
1621 
1622 /* ARGSUSED */
1623 int
1624 sbdp_mem_add_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1625 {
1626 	return (0);
1627 }
1628 
1629 int
1630 sbdp_mem_del_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1631 {
1632 	pfn_t		 basepfn = (pfn_t)(address >> PAGESHIFT);
1633 	pgcnt_t		 npages = (pgcnt_t)(size >> PAGESHIFT);
1634 
1635 	if (size > 0) {
1636 		int rv;
1637 		kcage_range_lock();
1638 		rv = kcage_range_delete_post_mem_del(basepfn, npages);
1639 		kcage_range_unlock();
1640 		if (rv != 0) {
1641 			cmn_err(CE_WARN,
1642 			    "unexpected kcage_range_delete_post_mem_del"
1643 			    " return value %d", rv);
1644 			sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1645 			return (-1);
1646 		}
1647 	}
1648 	return (0);
1649 }
1650 
1651 /*
1652  * This routine gets the size including the
1653  * bad banks
1654  */
1655 int
1656 sbdp_get_mem_size(sbdp_handle_t *hp)
1657 {
1658 	uint64_t	size = 0;
1659 	struct memlist	*mlist, *ml;
1660 
1661 	mlist = sbdp_get_memlist(hp, (dev_info_t *)NULL);
1662 
1663 	for (ml = mlist; ml; ml = ml->next)
1664 		size += ml->size;
1665 
1666 	(void) sbdp_del_memlist(hp, mlist);
1667 
1668 	SBDP_DBG_MEM("sbdp_get_mem_size: size 0x%" PRIx64 "\n", size);
1669 
1670 	return (btop(size));
1671 }
1672 
1673 /*
1674  * This function compares the list of banks passed with the banks
1675  * in the segment
1676  */
1677 int
1678 sbdp_check_seg_with_banks(sbdp_seg_t *seg, sbdp_bank_t *banks)
1679 {
1680 	sbdp_bank_t	*cur_bank, *bank;
1681 	int		i = 0;
1682 
1683 	for (cur_bank = seg->banks; cur_bank; cur_bank = cur_bank->seg_next) {
1684 		for (bank = banks; bank; bank = bank->bd_next) {
1685 			if (!bank->valid)
1686 				continue;
1687 
1688 			if (cur_bank == bank) {
1689 				i++;
1690 			}
1691 		}
1692 	}
1693 
1694 	SBDP_DBG_MEM("banks found = %d total banks = %d\n", i, seg->nbanks);
1695 	/*
1696 	 * If we find the same num of banks that are equal, then this segment
1697 	 * is not interleaved across boards
1698 	 */
1699 	if (i == seg->nbanks)
1700 		return (0);
1701 
1702 	return (1);
1703 }
1704 
1705 
1706 /*
1707  * This routine determines if any of the memory banks on the board
1708  * participate in across board memory interleaving
1709  */
1710 int
1711 sbdp_isinterleaved(sbdp_handle_t *hp, dev_info_t *dip)
1712 {
1713 	_NOTE(ARGUNUSED(dip))
1714 
1715 	sbdp_bank_t	*bankp;
1716 	int		wnode, board;
1717 	int		is_interleave = 0;
1718 	sbdp_bd_t	*bdp;
1719 	uint64_t	base;
1720 	sbdp_seg_t	*seg;
1721 
1722 	board = hp->h_board;
1723 	wnode = hp->h_wnode;
1724 
1725 #ifdef DEBUG
1726 	sbdp_print_all_segs();
1727 #endif
1728 	/*
1729 	 * Get the banks for this board
1730 	 */
1731 	bdp = sbdp_get_bd_info(wnode, board);
1732 
1733 	if (bdp == NULL)
1734 		return (-1);
1735 
1736 	/*
1737 	 * Search for the first bank with valid memory
1738 	 */
1739 	for (bankp = bdp->banks; bankp; bankp = bankp->bd_next)
1740 		if (bankp->valid)
1741 			break;
1742 
1743 	/*
1744 	 * If there are no banks in the board, then the board is
1745 	 * not interleaved across boards
1746 	 */
1747 	if (bankp == NULL) {
1748 		return (0);
1749 	}
1750 
1751 	base = bankp->um & ~(bankp->uk);
1752 
1753 	/*
1754 	 * Find the segment for the first bank
1755 	 */
1756 	if ((seg = sbdp_get_seg(base)) == NULL) {
1757 		/*
1758 		 * Something bad has happened.
1759 		 */
1760 		return (-1);
1761 	}
1762 	/*
1763 	 * Make sure that this segment is only composed of the banks
1764 	 * in this board. If one is missing or we have an extra one
1765 	 * the board is interleaved across boards
1766 	 */
1767 	is_interleave = sbdp_check_seg_with_banks(seg, bdp->banks);
1768 
1769 	SBDP_DBG_MEM("interleave is %d\n", is_interleave);
1770 
1771 	return (is_interleave);
1772 }
1773 
1774 
1775 /*
1776  * Each node has 4 logical banks.  This routine adds all the banks (including
1777  * the invalid ones to the passed list. Note that we use the bd list and not
1778  * the seg list
1779  */
1780 int
1781 sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks)
1782 {
1783 	int		i;
1784 	mc_regs_t	regs;
1785 	uint64_t	*mc_decode;
1786 	sbdp_bank_t 	*bank;
1787 
1788 	if (mc_read_regs(node, &regs) == -1)
1789 		return (-1);
1790 
1791 	mc_decode = regs.mc_decode;
1792 
1793 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1794 		/*
1795 		 * This creates the mem for the new member of the list
1796 		 */
1797 		sbdp_fill_bank_info(mc_decode[i], &bank);
1798 
1799 		SBDP_DBG_MEM("adding bank %d\n", bank->id);
1800 
1801 		/*
1802 		 * Insert bank into the beginning of the list
1803 		 */
1804 		bank->bd_next = *banks;
1805 		*banks = bank;
1806 
1807 		/*
1808 		 * Add this bank into its corresponding
1809 		 * segment
1810 		 */
1811 		sbdp_add_bank_to_seg(bank);
1812 	}
1813 	return (0);
1814 }
1815 
1816 /*
1817  * given the info, create a new bank node and set the info
1818  * as appropriate. We allocate the memory for the bank. It is
1819  * up to the caller to ensure the mem is freed
1820  */
1821 void
1822 sbdp_fill_bank_info(uint64_t mc_decode, sbdp_bank_t **bank)
1823 {
1824 	static int	id = 0;
1825 	sbdp_bank_t	*new;
1826 
1827 	new = kmem_zalloc(sizeof (sbdp_bank_t), KM_SLEEP);
1828 
1829 	new->id = id++;
1830 	new->valid = (mc_decode >> MC_VALID_SHIFT);
1831 	new->uk = MC_UK(mc_decode);
1832 	new->um = MC_UM(mc_decode);
1833 	new->lk = MC_LK(mc_decode);
1834 	new->lm = MC_LM(mc_decode);
1835 	new->bd_next = NULL;
1836 	new->seg_next = NULL;
1837 
1838 	*bank = new;
1839 }
1840 
1841 /*
1842  * Each bd has the potential of having mem banks on it.  The banks
1843  * may be empty or not.  This routine gets all the mem banks
1844  * for this bd
1845  */
1846 void
1847 sbdp_init_bd_banks(sbdp_bd_t *bdp)
1848 {
1849 	int		i, nmem;
1850 	pnode_t		*lists;
1851 
1852 	lists = bdp->nodes;
1853 	nmem = bdp->nnum;
1854 
1855 	if (bdp->banks != NULL) {
1856 		return;
1857 	}
1858 
1859 	bdp->banks = NULL;
1860 
1861 	for (i = 0; i < nmem; i++) {
1862 		(void) sbdp_add_nodes_banks(lists[i], &bdp->banks);
1863 	}
1864 }
1865 
1866 /*
1867  * swap the list of banks for the 2 boards
1868  */
1869 void
1870 sbdp_swap_list_of_banks(sbdp_bd_t *bdp1, sbdp_bd_t *bdp2)
1871 {
1872 	sbdp_bank_t	*tmp_ptr;
1873 
1874 	if ((bdp1 == NULL) || (bdp2 == NULL))
1875 		return;
1876 
1877 	tmp_ptr = bdp1->banks;
1878 	bdp1->banks = bdp2->banks;
1879 	bdp2->banks = tmp_ptr;
1880 }
1881 
1882 /*
1883  * free all the banks on the board.  Note that a bank node belongs
1884  * to 2 lists. The first list is the board list. The second one is
1885  * the seg list. We only need to remove the bank from both lists but only
1886  * free the node once.
1887  */
1888 void
1889 sbdp_fini_bd_banks(sbdp_bd_t *bdp)
1890 {
1891 	sbdp_bank_t	*bkp, *nbkp;
1892 
1893 	for (bkp = bdp->banks; bkp; ) {
1894 		/*
1895 		 * Remove the bank from the seg list first
1896 		 */
1897 		SBDP_DBG_MEM("Removing bank %d\n", bkp->id);
1898 		sbdp_remove_bank_from_seg(bkp);
1899 		nbkp = bkp->bd_next;
1900 		bkp->bd_next = NULL;
1901 		kmem_free(bkp, sizeof (sbdp_bank_t));
1902 
1903 		bkp = nbkp;
1904 	}
1905 	bdp->banks = NULL;
1906 }
1907 
1908 #ifdef DEBUG
1909 void
1910 sbdp_print_bd_banks(sbdp_bd_t *bdp)
1911 {
1912 	sbdp_bank_t	*bp;
1913 	int		i;
1914 
1915 	SBDP_DBG_MEM("BOARD %d\n", bdp->bd);
1916 
1917 	for (bp = bdp->banks, i = 0; bp; bp = bp->bd_next, i++) {
1918 		SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1919 		SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1920 		    "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1921 		    bp->lk, bp->lm);
1922 	}
1923 }
1924 
1925 void
1926 sbdp_print_all_segs(void)
1927 {
1928 	sbdp_seg_t	*cur_seg;
1929 
1930 	for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next)
1931 		sbdp_print_seg(cur_seg);
1932 }
1933 
1934 void
1935 sbdp_print_seg(sbdp_seg_t *seg)
1936 {
1937 	sbdp_bank_t	*bp;
1938 	int		i;
1939 
1940 	SBDP_DBG_MEM("SEG %d\n", seg->id);
1941 
1942 	for (bp = seg->banks, i = 0; bp; bp = bp->seg_next, i++) {
1943 		SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1944 		SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1945 		    "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1946 		    bp->lk, bp->lm);
1947 	}
1948 }
1949 #endif
1950 
1951 void
1952 sbdp_add_bank_to_seg(sbdp_bank_t *bank)
1953 {
1954 	uint64_t	base;
1955 	sbdp_seg_t	*cur_seg;
1956 	static int	id = 0;
1957 
1958 	/*
1959 	 * if we got an invalid bank just skip it
1960 	 */
1961 	if (bank == NULL || !bank->valid)
1962 		return;
1963 	base = bank->um & ~(bank->uk);
1964 
1965 	if ((cur_seg = sbdp_get_seg(base)) == NULL) {
1966 		/*
1967 		 * This bank is part of a new segment, so create
1968 		 * a struct for it and added to the list of segments
1969 		 */
1970 		cur_seg = kmem_zalloc(sizeof (sbdp_seg_t), KM_SLEEP);
1971 		cur_seg->id = id++;
1972 		cur_seg->base = base;
1973 		cur_seg->size = ((bank->uk +1) << PHYS2UM_SHIFT);
1974 		cur_seg->intlv = ((bank->lk ^ 0xF) + 1);
1975 		/*
1976 		 * add to the seg list
1977 		 */
1978 		cur_seg->next = sys_seg;
1979 		sys_seg = cur_seg;
1980 	}
1981 
1982 	cur_seg->nbanks++;
1983 	/*
1984 	 * add bank into segs bank list.  Note we add at the head
1985 	 */
1986 	bank->seg_next = cur_seg->banks;
1987 	cur_seg->banks = bank;
1988 }
1989 
1990 /*
1991  * Remove this segment from the seg list
1992  */
1993 void
1994 sbdp_rm_seg(sbdp_seg_t *seg)
1995 {
1996 	sbdp_seg_t	**curpp, *curp;
1997 
1998 	curpp = &sys_seg;
1999 
2000 	while ((curp = *curpp) != NULL) {
2001 		if (curp == seg) {
2002 			*curpp = curp->next;
2003 			break;
2004 		}
2005 		curpp = &curp->next;
2006 	}
2007 
2008 	if (curp != NULL) {
2009 		kmem_free(curp, sizeof (sbdp_seg_t));
2010 		curp = NULL;
2011 	}
2012 }
2013 
2014 /*
2015  * remove this bank from its seg list
2016  */
2017 void
2018 sbdp_remove_bank_from_seg(sbdp_bank_t *bank)
2019 {
2020 	uint64_t	base;
2021 	sbdp_seg_t	*cur_seg;
2022 	sbdp_bank_t	**curpp, *curp;
2023 
2024 	/*
2025 	 * if we got an invalid bank just skip it
2026 	 */
2027 	if (bank == NULL || !bank->valid)
2028 		return;
2029 	base = bank->um & ~(bank->uk);
2030 
2031 	/*
2032 	 * If the bank doesn't belong to any seg just return
2033 	 */
2034 	if ((cur_seg = sbdp_get_seg(base)) == NULL) {
2035 		SBDP_DBG_MEM("bank %d with no segment\n", bank->id);
2036 		return;
2037 	}
2038 
2039 	/*
2040 	 * Find bank in the seg
2041 	 */
2042 	curpp = &cur_seg->banks;
2043 
2044 	while ((curp = *curpp) != NULL) {
2045 		if (curp->id == bank->id) {
2046 			/*
2047 			 * found node, remove it
2048 			 */
2049 			*curpp = curp->seg_next;
2050 			break;
2051 		}
2052 		curpp = &curp->seg_next;
2053 	}
2054 
2055 	if (curp != NULL) {
2056 		cur_seg->nbanks--;
2057 	}
2058 
2059 	if (cur_seg->nbanks == 0) {
2060 		/*
2061 		 * No banks left on this segment, remove the segment
2062 		 */
2063 		SBDP_DBG_MEM("No banks left in this segment, removing it\n");
2064 		sbdp_rm_seg(cur_seg);
2065 	}
2066 }
2067 
2068 sbdp_seg_t *
2069 sbdp_get_seg(uint64_t base)
2070 {
2071 	sbdp_seg_t	*cur_seg;
2072 
2073 	for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next) {
2074 		if (cur_seg-> base == base)
2075 			break;
2076 	}
2077 
2078 	return (cur_seg);
2079 }
2080 
2081 #ifdef DEBUG
2082 int
2083 sbdp_passthru_readmem(sbdp_handle_t *hp, void *arg)
2084 {
2085 	_NOTE(ARGUNUSED(hp))
2086 	_NOTE(ARGUNUSED(arg))
2087 
2088 	struct memlist	*ml;
2089 	uint64_t	src_pa;
2090 	uint64_t	dst_pa;
2091 	uint64_t	dst;
2092 
2093 
2094 	dst_pa = va_to_pa(&dst);
2095 
2096 	memlist_read_lock();
2097 	for (ml = phys_install; ml; ml = ml->next) {
2098 		uint64_t	nbytes;
2099 
2100 		src_pa = ml->address;
2101 		nbytes = ml->size;
2102 
2103 		while (nbytes != 0ull) {
2104 
2105 			/* copy 32 bytes at src_pa to dst_pa */
2106 			bcopy32_il(src_pa, dst_pa);
2107 
2108 			/* increment by 32 bytes */
2109 			src_pa += (4 * sizeof (uint64_t));
2110 
2111 			/* decrement by 32 bytes */
2112 			nbytes -= (4 * sizeof (uint64_t));
2113 		}
2114 	}
2115 	memlist_read_unlock();
2116 
2117 	return (0);
2118 }
2119 
2120 static int
2121 isdigit(int ch)
2122 {
2123 	return (ch >= '0' && ch <= '9');
2124 }
2125 
2126 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
2127 
2128 int
2129 sbdp_strtoi(char *p, char **pos)
2130 {
2131 	int n;
2132 	int c, neg = 0;
2133 
2134 	if (!isdigit(c = *p)) {
2135 		while (isspace(c))
2136 			c = *++p;
2137 		switch (c) {
2138 			case '-':
2139 				neg++;
2140 				/* FALLTHROUGH */
2141 			case '+':
2142 				c = *++p;
2143 		}
2144 		if (!isdigit(c)) {
2145 			if (pos != NULL)
2146 				*pos = p;
2147 			return (0);
2148 		}
2149 	}
2150 	for (n = '0' - c; isdigit(c = *++p); ) {
2151 		n *= 10; /* two steps to avoid unnecessary overflow */
2152 		n += '0' - c; /* accum neg to avoid surprises at MAX */
2153 	}
2154 	if (pos != NULL)
2155 		*pos = p;
2156 	return (neg ? n : -n);
2157 }
2158 
2159 int
2160 sbdp_passthru_prep_script(sbdp_handle_t *hp, void *arg)
2161 {
2162 	int			board, i;
2163 	sbdp_bd_t		*t_bdp, *s_bdp;
2164 	char			*opts;
2165 	int			t_board;
2166 	sbdp_rename_script_t	*rsbuffer;
2167 	sbdp_cr_handle_t	*cph;
2168 	int			scriptlen, size;
2169 
2170 	opts = (char *)arg;
2171 	board = hp->h_board;
2172 
2173 	opts += strlen("prep-script=");
2174 	t_board = sbdp_strtoi(opts, NULL);
2175 
2176 	cph =  kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
2177 
2178 	size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
2179 	rsbuffer = kmem_zalloc(size, KM_SLEEP);
2180 
2181 	s_bdp = sbdp_get_bd_info(hp->h_wnode, board);
2182 	t_bdp = sbdp_get_bd_info(hp->h_wnode, t_board);
2183 
2184 	cph->s_bdp = s_bdp;
2185 	cph->t_bdp = t_bdp;
2186 	cph->script = rsbuffer;
2187 
2188 	affinity_set(CPU_CURRENT);
2189 	scriptlen = sbdp_prep_rename_script(cph);
2190 
2191 	if (scriptlen <= 0) {
2192 		cmn_err(CE_WARN,
2193 		"sbdp failed to prep for copy-rename");
2194 	}
2195 	prom_printf("SCRIPT from board %d to board %d ->\n", board, t_board);
2196 	for (i = 0;  i < (scriptlen / (sizeof (sbdp_rename_script_t))); i++) {
2197 		prom_printf("0x%lx = 0x%lx, asi 0x%x\n",
2198 		    rsbuffer[i].masr_addr, rsbuffer[i].masr, rsbuffer[i].asi);
2199 	}
2200 	prom_printf("\n");
2201 
2202 	affinity_clear();
2203 	kmem_free(rsbuffer, size);
2204 	kmem_free(cph, sizeof (sbdp_cr_handle_t));
2205 
2206 	return (0);
2207 }
2208 #endif
2209