xref: /illumos-gate/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c (revision cf8b971efe8cbaaac8c733c2466206380608c8e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * memory management for serengeti dr memory
29  */
30 
31 #include <sys/obpdefs.h>
32 #include <sys/types.h>
33 #include <sys/conf.h>
34 #include <sys/ddi.h>
35 #include <sys/cpuvar.h>
36 #include <sys/memlist_impl.h>
37 #include <sys/machsystm.h>
38 #include <sys/promif.h>
39 #include <sys/mem_cage.h>
40 #include <sys/kmem.h>
41 #include <sys/note.h>
42 #include <sys/lgrp.h>
43 
44 #include <sys/sbd_ioctl.h>
45 #include <sys/sbd.h>
46 #include <sys/sbdp_priv.h>
47 #include <sys/sbdp_mem.h>
48 #include <sys/sun4asi.h>
49 #include <sys/cheetahregs.h>
50 #include <sys/cpu_module.h>
51 #include <sys/esunddi.h>
52 
53 #include <vm/page.h>
54 
55 static int	sbdp_get_meminfo(pnode_t, int, uint64_t *, uint64_t *);
56 int		mc_read_regs(pnode_t, mc_regs_t *);
57 uint64_t	mc_get_addr(pnode_t, int, uint_t *);
58 static pnode_t	mc_get_sibling_cpu(pnode_t nodeid);
59 static int	mc_get_sibling_cpu_impl(pnode_t nodeid);
60 static sbd_cond_t mc_check_sibling_cpu(pnode_t nodeid);
61 static void	_sbdp_copy_rename_end(void);
62 static int	sbdp_copy_rename__relocatable(sbdp_cr_handle_t *,
63 			struct memlist *, sbdp_rename_script_t *);
64 static int	sbdp_prep_rename_script(sbdp_cr_handle_t *);
65 static int	sbdp_get_lowest_addr_in_node(pnode_t, uint64_t *);
66 
67 extern void bcopy32_il(uint64_t, uint64_t);
68 extern void flush_ecache_il(uint64_t physaddr, size_t size, size_t linesize);
69 extern uint64_t lddphys_il(uint64_t physaddr);
70 extern uint64_t ldxasi_il(uint64_t physaddr, uint_t asi);
71 extern void sbdp_exec_script_il(sbdp_rename_script_t *rsp);
72 void sbdp_fill_bank_info(uint64_t, sbdp_bank_t **);
73 int sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks);
74 void sbdp_add_bank_to_seg(sbdp_bank_t *);
75 void sbdp_remove_bank_from_seg(sbdp_bank_t *);
76 uint64_t sbdp_determine_slice(sbdp_handle_t *);
77 sbdp_seg_t *sbdp_get_seg(uint64_t);
78 #ifdef DEBUG
79 void sbdp_print_seg(sbdp_seg_t *);
80 #endif
81 
82 /*
83  * Head to the system segments link list
84  */
85 sbdp_seg_t *sys_seg = NULL;
86 
87 uint64_t
88 sbdp_determine_slice(sbdp_handle_t *hp)
89 {
90 	int size;
91 
92 	size = sbdp_get_mem_size(hp);
93 
94 	if (size <= SG_SLICE_16G_SIZE) {
95 		return (SG_SLICE_16G_SIZE);
96 	} else if (size <= SG_SLICE_32G_SIZE) {
97 		return (SG_SLICE_32G_SIZE);
98 	} else {
99 		return (SG_SLICE_64G_SIZE);
100 	}
101 }
102 
103 /* ARGSUSED */
104 int
105 sbdp_get_mem_alignment(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *align)
106 {
107 	*align = sbdp_determine_slice(hp);
108 	return (0);
109 }
110 
111 
112 void
113 sbdp_memlist_dump(struct memlist *mlist)
114 {
115 	register struct memlist *ml;
116 
117 	if (mlist == NULL) {
118 		SBDP_DBG_MEM("memlist> EMPTY\n");
119 	} else {
120 		for (ml = mlist; ml; ml = ml->next)
121 			SBDP_DBG_MEM("memlist>  0x%" PRIx64", 0x%" PRIx64"\n",
122 			    ml->address, ml->size);
123 	}
124 }
125 
126 struct mem_arg {
127 	int	board;
128 	int	ndips;
129 	dev_info_t **list;
130 };
131 
132 /*
133  * Returns mem dip held
134  */
135 static int
136 sbdp_get_mem_dip(pnode_t node, void *arg, uint_t flags)
137 {
138 	_NOTE(ARGUNUSED(flags))
139 
140 	dev_info_t *dip;
141 	pnode_t nodeid;
142 	mem_op_t mem = {0};
143 	struct mem_arg *ap = arg;
144 
145 	if (node == OBP_BADNODE || node == OBP_NONODE)
146 		return (DDI_FAILURE);
147 
148 	mem.nodes = &nodeid;
149 	mem.board = ap->board;
150 	mem.nmem = 0;
151 
152 	(void) sbdp_is_mem(node, &mem);
153 
154 	ASSERT(mem.nmem == 0 || mem.nmem == 1);
155 
156 	if (mem.nmem == 0 || nodeid != node)
157 		return (DDI_FAILURE);
158 
159 	dip = e_ddi_nodeid_to_dip(nodeid);
160 	if (dip) {
161 		ASSERT(ap->ndips < SBDP_MAX_MEM_NODES_PER_BOARD);
162 		ap->list[ap->ndips++] = dip;
163 	}
164 	return (DDI_SUCCESS);
165 }
166 
167 struct memlist *
168 sbdp_get_memlist(sbdp_handle_t *hp, dev_info_t *dip)
169 {
170 	_NOTE(ARGUNUSED(dip))
171 
172 	int i, j, skip = 0;
173 	dev_info_t	*list[SBDP_MAX_MEM_NODES_PER_BOARD];
174 	struct mem_arg	arg = {0};
175 	uint64_t	base_pa, size;
176 	struct memlist	*mlist = NULL;
177 
178 	list[0] = NULL;
179 	arg.board = hp->h_board;
180 	arg.list = list;
181 
182 	sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
183 
184 	for (i = 0; i < arg.ndips; i++) {
185 		if (list[i] == NULL)
186 			continue;
187 
188 		size = 0;
189 		for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
190 			if (sbdp_get_meminfo(ddi_get_nodeid(list[i]), j,
191 			    &size, &base_pa)) {
192 				skip++;
193 				continue;
194 			}
195 			if (size == -1 || size == 0)
196 				continue;
197 
198 			(void) memlist_add_span(base_pa, size, &mlist);
199 		}
200 
201 		/*
202 		 * Release hold acquired in sbdp_get_mem_dip()
203 		 */
204 		ddi_release_devi(list[i]);
205 	}
206 
207 	/*
208 	 * XXX - The following two lines are from existing code.
209 	 * However, this appears to be incorrect - this check should be
210 	 * made for each dip in list i.e within the for(i) loop.
211 	 */
212 	if (skip == SBDP_MAX_MCS_PER_NODE)
213 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
214 
215 	SBDP_DBG_MEM("memlist for board %d\n", hp->h_board);
216 	sbdp_memlist_dump(mlist);
217 	return (mlist);
218 }
219 
220 struct memlist *
221 sbdp_memlist_dup(struct memlist *mlist)
222 {
223 	struct memlist *hl, *prev;
224 
225 	if (mlist == NULL)
226 		return (NULL);
227 
228 	prev = NULL;
229 	hl = NULL;
230 	for (; mlist; mlist = mlist->next) {
231 		struct memlist *mp;
232 
233 		mp = memlist_get_one();
234 		if (mp == NULL) {
235 			if (hl != NULL)
236 				memlist_free_list(hl);
237 			hl = NULL;
238 			break;
239 		}
240 		mp->address = mlist->address;
241 		mp->size = mlist->size;
242 		mp->next = NULL;
243 		mp->prev = prev;
244 
245 		if (prev == NULL)
246 			hl = mp;
247 		else
248 			prev->next = mp;
249 		prev = mp;
250 	}
251 
252 	return (hl);
253 }
254 
255 int
256 sbdp_del_memlist(sbdp_handle_t *hp, struct memlist *mlist)
257 {
258 	_NOTE(ARGUNUSED(hp))
259 
260 	memlist_free_list(mlist);
261 
262 	return (0);
263 }
264 
265 /*ARGSUSED*/
266 static void
267 sbdp_flush_ecache(uint64_t a, uint64_t b)
268 {
269 	cpu_flush_ecache();
270 }
271 
272 typedef enum {
273 	SBDP_CR_OK,
274 	SBDP_CR_MC_IDLE_ERR
275 } sbdp_cr_err_t;
276 
277 int
278 sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
279 {
280 	sbdp_bd_t	*s_bdp, *t_bdp;
281 	int		err = 0;
282 	caddr_t		mempage;
283 	ulong_t		data_area, index_area;
284 	ulong_t		e_area, e_page;
285 	int		availlen, indexlen, funclen, scriptlen;
286 	int		*indexp;
287 	time_t		copytime;
288 	int		(*funcp)();
289 	size_t		size;
290 	struct memlist	*mlist;
291 	sbdp_sr_handle_t	*srhp;
292 	sbdp_rename_script_t	*rsp;
293 	sbdp_rename_script_t	*rsbuffer;
294 	sbdp_cr_handle_t	*cph;
295 	int		linesize;
296 	uint64_t	neer;
297 	sbdp_cr_err_t	cr_err;
298 
299 	cph =  kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
300 
301 	SBDP_DBG_MEM("moving memory from memory board %d to board %d\n",
302 	    hp->h_board, t_bd);
303 
304 	s_bdp = sbdp_get_bd_info(hp->h_wnode, hp->h_board);
305 	t_bdp = sbdp_get_bd_info(hp->h_wnode, t_bd);
306 
307 	if ((s_bdp == NULL) || (t_bdp == NULL)) {
308 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
309 		return (-1);
310 	}
311 
312 	funclen = (int)((ulong_t)_sbdp_copy_rename_end -
313 	    (ulong_t)sbdp_copy_rename__relocatable);
314 
315 	if (funclen > PAGESIZE) {
316 		cmn_err(CE_WARN,
317 		    "sbdp: copy-rename funclen (%d) > PAGESIZE (%d)",
318 		    funclen, PAGESIZE);
319 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
320 		return (-1);
321 	}
322 
323 	/*
324 	 * mempage will be page aligned, since we're calling
325 	 * kmem_alloc() with an exact multiple of PAGESIZE.
326 	 */
327 	mempage = kmem_alloc(PAGESIZE, KM_SLEEP);
328 
329 	SBDP_DBG_MEM("mempage = 0x%p\n", mempage);
330 
331 	/*
332 	 * Copy the code for the copy-rename routine into
333 	 * a page aligned piece of memory.  We do this to guarantee
334 	 * that we're executing within the same page and thus reduce
335 	 * the possibility of cache collisions between different
336 	 * pages.
337 	 */
338 	bcopy((caddr_t)sbdp_copy_rename__relocatable, mempage, funclen);
339 
340 	funcp = (int (*)())mempage;
341 
342 	SBDP_DBG_MEM("copy-rename funcp = 0x%p (len = 0x%x)\n", funcp, funclen);
343 
344 	/*
345 	 * Prepare data page that will contain script of
346 	 * operations to perform during copy-rename.
347 	 * Allocate temporary buffer to hold script.
348 	 */
349 
350 	size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
351 	rsbuffer = kmem_zalloc(size, KM_SLEEP);
352 
353 	cph->s_bdp = s_bdp;
354 	cph->t_bdp = t_bdp;
355 	cph->script = rsbuffer;
356 
357 	/*
358 	 * We need to make sure we don't switch cpus since we depend on the
359 	 * correct cpu processing
360 	 */
361 	affinity_set(CPU_CURRENT);
362 	scriptlen = sbdp_prep_rename_script(cph);
363 	if (scriptlen <= 0) {
364 		cmn_err(CE_WARN, "sbdp failed to prep for copy-rename");
365 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
366 		err = 1;
367 		goto cleanup;
368 	}
369 	SBDP_DBG_MEM("copy-rename script length = 0x%x\n", scriptlen);
370 
371 	indexlen = sizeof (*indexp) << 1;
372 
373 	if ((funclen + scriptlen + indexlen) > PAGESIZE) {
374 		cmn_err(CE_WARN, "sbdp: func len (%d) + script len (%d) "
375 		    "+ index len (%d) > PAGESIZE (%d)", funclen, scriptlen,
376 		    indexlen, PAGESIZE);
377 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
378 		err = 1;
379 		goto cleanup;
380 	}
381 
382 	linesize = cpunodes[CPU->cpu_id].ecache_linesize;
383 
384 	/*
385 	 * Find aligned area within data page to maintain script.
386 	 */
387 	data_area = (ulong_t)mempage;
388 	data_area += (ulong_t)funclen + (ulong_t)(linesize - 1);
389 	data_area &= ~((ulong_t)(linesize - 1));
390 
391 	availlen = PAGESIZE - indexlen;
392 	availlen -= (int)(data_area - (ulong_t)mempage);
393 
394 	if (availlen < scriptlen) {
395 		cmn_err(CE_WARN, "sbdp: available len (%d) < script len (%d)",
396 		    availlen, scriptlen);
397 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
398 		err = 1;
399 		goto cleanup;
400 	}
401 
402 	SBDP_DBG_MEM("copy-rename script data area = 0x%lx\n",
403 	    data_area);
404 
405 	bcopy((caddr_t)rsbuffer, (caddr_t)data_area, scriptlen);
406 	rsp = (sbdp_rename_script_t *)data_area;
407 
408 	index_area = data_area + (ulong_t)scriptlen + (ulong_t)(linesize - 1);
409 	index_area &= ~((ulong_t)(linesize - 1));
410 	indexp = (int *)index_area;
411 	indexp[0] = 0;
412 	indexp[1] = 0;
413 
414 	e_area = index_area + (ulong_t)indexlen;
415 	e_page = (ulong_t)mempage + PAGESIZE;
416 	if (e_area > e_page) {
417 		cmn_err(CE_WARN,
418 		    "sbdp: index area size (%d) > available (%d)\n",
419 		    indexlen, (int)(e_page - index_area));
420 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
421 		err = 1;
422 		goto cleanup;
423 	}
424 
425 	SBDP_DBG_MEM("copy-rename index area = 0x%p\n", indexp);
426 
427 	SBDP_DBG_MEM("cpu %d\n", CPU->cpu_id);
428 
429 	srhp = sbdp_get_sr_handle();
430 	ASSERT(srhp);
431 
432 	srhp->sr_flags = hp->h_flags;
433 
434 	copytime = ddi_get_lbolt();
435 
436 	mutex_enter(&s_bdp->bd_mutex);
437 	mlist = sbdp_memlist_dup(s_bdp->ml);
438 	mutex_exit(&s_bdp->bd_mutex);
439 
440 	if (mlist == NULL) {
441 		SBDP_DBG_MEM("Didn't find memory list\n");
442 	}
443 	SBDP_DBG_MEM("src\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
444 	    s_bdp->bd, s_bdp->wnode, s_bdp->bpa, s_bdp->nodes);
445 	sbdp_memlist_dump(s_bdp->ml);
446 	SBDP_DBG_MEM("tgt\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
447 	    t_bdp->bd, t_bdp->wnode, t_bdp->bpa, t_bdp->nodes);
448 	sbdp_memlist_dump(t_bdp->ml);
449 
450 	/*
451 	 * Quiesce the OS.
452 	 */
453 	if (sbdp_suspend(srhp)) {
454 		sbd_error_t	*sep;
455 		cmn_err(CE_WARN, "sbdp: failed to quiesce OS for copy-rename");
456 		sep = &srhp->sep;
457 		sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
458 		sbdp_release_sr_handle(srhp);
459 		sbdp_del_memlist(hp, mlist);
460 		err = 1;
461 		goto cleanup;
462 	}
463 
464 	/*
465 	 * =================================
466 	 * COPY-RENAME BEGIN.
467 	 * =================================
468 	 */
469 	SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
470 	    cph->t_bdp->bpa);
471 
472 	cph->ret = 0;
473 
474 	SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
475 
476 	SBDP_DBG_MEM("Flushing all of the cpu caches\n");
477 	xc_all(sbdp_flush_ecache, 0, 0);
478 
479 	/* disable CE reporting */
480 	neer = get_error_enable();
481 	set_error_enable(neer & ~EN_REG_CEEN);
482 
483 	cr_err = (*funcp)(cph, mlist, rsp);
484 
485 	/* enable CE reporting */
486 	set_error_enable(neer);
487 
488 	SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
489 	    cph->t_bdp->bpa);
490 	SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
491 	SBDP_DBG_MEM("after execking the function\n");
492 
493 	/*
494 	 * =================================
495 	 * COPY-RENAME END.
496 	 * =================================
497 	 */
498 	SBDP_DBG_MEM("err is 0x%d\n", err);
499 
500 	/*
501 	 * Resume the OS.
502 	 */
503 	sbdp_resume(srhp);
504 	if (srhp->sep.e_code) {
505 		sbd_error_t	*sep;
506 		cmn_err(CE_WARN,
507 		    "sbdp: failed to resume OS for copy-rename");
508 		sep = &srhp->sep;
509 		sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
510 		err = 1;
511 	}
512 
513 	copytime = ddi_get_lbolt() - copytime;
514 
515 	sbdp_release_sr_handle(srhp);
516 	sbdp_del_memlist(hp, mlist);
517 
518 	SBDP_DBG_MEM("copy-rename elapsed time = %ld ticks (%ld secs)\n",
519 	    copytime, copytime / hz);
520 
521 	switch (cr_err) {
522 	case SBDP_CR_OK:
523 		break;
524 	case SBDP_CR_MC_IDLE_ERR: {
525 		dev_info_t *dip;
526 		pnode_t nodeid = cph->busy_mc->node;
527 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
528 
529 		dip = e_ddi_nodeid_to_dip(nodeid);
530 
531 		ASSERT(dip != NULL);
532 
533 		(void) ddi_pathname(dip, path);
534 		ddi_release_devi(dip);
535 		cmn_err(CE_WARN, "failed to idle memory controller %s: "
536 		    "copy-rename aborted", path);
537 		kmem_free(path, MAXPATHLEN);
538 		sbdp_set_err(hp->h_err, ESBD_MEMFAIL, NULL);
539 		err = 1;
540 		break;
541 	}
542 	default:
543 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
544 		cmn_err(CE_WARN, "unknown copy-rename error code (%d)", cr_err);
545 		err = 1;
546 		break;
547 	}
548 
549 	if (err)
550 		goto cleanup;
551 
552 	/*
553 	 * Rename memory for lgroup.
554 	 * Source and target board numbers are packaged in arg.
555 	 */
556 	lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
557 	    (uintptr_t)(s_bdp->bd | (t_bdp->bd << 16)));
558 
559 	/*
560 	 * swap list of banks
561 	 */
562 	sbdp_swap_list_of_banks(s_bdp, t_bdp);
563 
564 	/*
565 	 * Update the cached board info for both the source and the target
566 	 */
567 	sbdp_update_bd_info(s_bdp);
568 	sbdp_update_bd_info(t_bdp);
569 
570 	/*
571 	 * Tell the sc that we have swapped slices.
572 	 */
573 	if (sbdp_swap_slices(s_bdp->bd, t_bdp->bd) != 0) {
574 		/* This is dangerous. The in use slice could be re-used! */
575 		SBDP_DBG_MEM("swaping slices failed\n");
576 	}
577 
578 cleanup:
579 	kmem_free(rsbuffer, size);
580 	kmem_free(mempage, PAGESIZE);
581 	kmem_free(cph, sizeof (sbdp_cr_handle_t));
582 	affinity_clear();
583 
584 	return (err ? -1 : 0);
585 }
586 
587 static int
588 sbdp_copy_regs(pnode_t node, uint64_t bpa, uint64_t new_base, int inval,
589 	sbdp_rename_script_t *rsp, int *index)
590 {
591 	int		i, m;
592 	mc_regs_t	regs;
593 	uint64_t	*mc_decode;
594 
595 	if (mc_read_regs(node, &regs)) {
596 		SBDP_DBG_MEM("sbdp_copy_regs: failed to read source Decode "
597 		    "Regs");
598 		return (-1);
599 	}
600 
601 	mc_decode = regs.mc_decode;
602 
603 	m = *index;
604 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
605 		uint64_t	offset, seg_pa, tmp_base;
606 
607 		/*
608 		 * Skip invalid banks
609 		 */
610 		if ((mc_decode[i] & SG_DECODE_VALID) != SG_DECODE_VALID) {
611 			continue;
612 		}
613 
614 		tmp_base = new_base;
615 		if (!inval) {
616 			/*
617 			 * We need to calculate the offset from the base pa
618 			 * to add it appropriately to the new_base.
619 			 * The offset needs to be in UM relative to the mc
620 			 * decode register.  Since we are going from physical
621 			 * address to UM, we need to shift it by PHYS2UM_SHIFT.
622 			 * To get it ready to OR it with the MC decode reg,
623 			 * we need to shift it left MC_UM_SHIFT
624 			 */
625 			seg_pa = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
626 			offset = (seg_pa - bpa);
627 			/* Convert tmp_base into a physical address */
628 			tmp_base = (tmp_base >> MC_UM_SHIFT) << PHYS2UM_SHIFT;
629 			tmp_base += offset;
630 			/* Convert tmp_base to be MC reg ready */
631 			tmp_base = (tmp_base >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
632 		}
633 
634 		mc_decode[i] &= ~SG_DECODE_UM;
635 		mc_decode[i] |= tmp_base;
636 		mc_decode[i] |= SG_DECODE_VALID;
637 
638 		/*
639 		 * Step 1:	Write source base address to the MC
640 		 *		with present bit off.
641 		 */
642 		rsp[m].masr_addr = mc_get_addr(node, i, &rsp[m].asi);
643 		rsp[m].masr = mc_decode[i] & ~SG_DECODE_VALID;
644 		m++;
645 		/*
646 		 * Step 2:	Now rewrite the mc reg with present bit on.
647 		 */
648 		rsp[m].masr_addr = rsp[m-1].masr_addr;
649 		rsp[m].masr = mc_decode[i];
650 		rsp[m].asi = rsp[m-1].asi;
651 		m++;
652 	}
653 
654 	*index = m;
655 	return (0);
656 }
657 
658 static int
659 sbdp_get_reg_addr(pnode_t nodeid, uint64_t *pa)
660 {
661 	mc_regspace	reg;
662 	int		len;
663 
664 	len = prom_getproplen(nodeid, "reg");
665 	if (len != sizeof (mc_regspace))
666 		return (-1);
667 
668 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
669 		return (-1);
670 
671 	ASSERT(pa != NULL);
672 
673 	*pa = ((uint64_t)reg.regspec_addr_hi) << 32;
674 	*pa |= (uint64_t)reg.regspec_addr_lo;
675 
676 	return (0);
677 }
678 
679 static int
680 mc_get_sibling_cpu_impl(pnode_t mc_node)
681 {
682 	int	len, impl;
683 	pnode_t	cpu_node;
684 	char	namebuf[OBP_MAXPROPNAME];
685 
686 	cpu_node = mc_get_sibling_cpu(mc_node);
687 	if (cpu_node == OBP_NONODE) {
688 		SBDP_DBG_MEM("mc_get_sibling_cpu failed: dnode=0x%x\n",
689 		    mc_node);
690 		return (-1);
691 	}
692 
693 	len = prom_getproplen(cpu_node, "name");
694 	if (len < 0) {
695 		SBDP_DBG_MEM("invalid prom_getproplen for name prop: "
696 		    "len=%d, dnode=0x%x\n", len, cpu_node);
697 		return (-1);
698 	}
699 
700 	if (prom_getprop(cpu_node, "name", (caddr_t)namebuf) == -1) {
701 		SBDP_DBG_MEM("failed to read name property for dnode=0x%x\n",
702 		    cpu_node);
703 		return (-1);
704 	}
705 
706 	/*
707 	 * If this is a CMP node, the child has the implementation
708 	 * property.
709 	 */
710 	if (strcmp(namebuf, "cmp") == 0) {
711 		cpu_node = prom_childnode(cpu_node);
712 		ASSERT(cpu_node != OBP_NONODE);
713 	}
714 
715 	if (prom_getprop(cpu_node, "implementation#", (caddr_t)&impl) == -1) {
716 		SBDP_DBG_MEM("failed to read implementation# property for "
717 		    "dnode=0x%x\n", cpu_node);
718 		return (-1);
719 	}
720 
721 	SBDP_DBG_MEM("mc_get_sibling_cpu_impl: found impl=0x%x, dnode=0x%x\n",
722 	    impl, cpu_node);
723 
724 	return (impl);
725 }
726 
727 /*
728  * Provide EMU Activity Status register ASI and address.  Only valid for
729  * Panther processors.
730  */
731 static int
732 mc_get_idle_reg(pnode_t nodeid, uint64_t *addr, uint_t *asi)
733 {
734 	int	portid;
735 	uint64_t reg_pa;
736 
737 	ASSERT(nodeid != OBP_NONODE);
738 	ASSERT(mc_get_sibling_cpu_impl(nodeid) == PANTHER_IMPL);
739 
740 	if (prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0 ||
741 	    portid == -1) {
742 		SBDP_DBG_MEM("mc_get_idle_reg: failed to read portid prop "
743 		    "for dnode=0x%x\n", nodeid);
744 		return (-1);
745 	}
746 
747 	if (sbdp_get_reg_addr(nodeid, &reg_pa) != 0) {
748 		SBDP_DBG_MEM("mc_get_idle_reg: failed to read reg prop "
749 		    "for dnode=0x%x\n", nodeid);
750 		return (-1);
751 	}
752 
753 	/*
754 	 * Local access will be via ASI 0x4a, otherwise via Safari PIO.
755 	 * This assumes the copy-rename will later run on the same proc,
756 	 * hence there is an assumption we are already bound.
757 	 */
758 	ASSERT(curthread->t_bound_cpu == CPU);
759 	if (SG_CPUID_TO_PORTID(CPU->cpu_id) == portid) {
760 		*addr = ASI_EMU_ACT_STATUS_VA;
761 		*asi = ASI_SAFARI_CONFIG;
762 	} else {
763 		*addr = MC_ACTIVITY_STATUS(reg_pa);
764 		*asi = ASI_IO;
765 	}
766 
767 	return (0);
768 }
769 
770 /*
771  * If non-Panther board, add phys_banks entry for each physical bank.
772  * If Panther board, add mc_idle_regs entry for each EMU Activity Status
773  * register.  Increment the array indices b_idx and r_idx for each entry
774  * populated by this routine.
775  *
776  * The caller is responsible for allocating sufficient array entries.
777  */
778 static int
779 sbdp_prep_mc_idle_one(sbdp_bd_t *bp, sbdp_rename_script_t phys_banks[],
780     int *b_idx, sbdp_mc_idle_script_t mc_idle_regs[], int *r_idx)
781 {
782 	int		i, j;
783 	pnode_t		*memnodes;
784 	mc_regs_t	regs;
785 	uint64_t	addr;
786 	uint_t		asi;
787 	sbd_cond_t	sibling_cpu_cond;
788 	int		impl = -1;
789 
790 	memnodes = bp->nodes;
791 
792 	for (i = 0; i < SBDP_MAX_MEM_NODES_PER_BOARD; i++) {
793 		if (memnodes[i] == OBP_NONODE) {
794 			continue;
795 		}
796 
797 		/* MC should not be accessed if cpu has failed  */
798 		sibling_cpu_cond = mc_check_sibling_cpu(memnodes[i]);
799 		if (sibling_cpu_cond == SBD_COND_FAILED ||
800 		    sibling_cpu_cond == SBD_COND_UNUSABLE) {
801 			SBDP_DBG_MEM("sbdp: skipping MC with failed cpu: "
802 			    "board=%d, mem node=%d, condition=%d",
803 			    bp->bd, i, sibling_cpu_cond);
804 			continue;
805 		}
806 
807 		/*
808 		 * Initialize the board cpu type, assuming all board cpus are
809 		 * the same type.  This is true of all Cheetah-based processors.
810 		 * Failure to read the cpu type is considered a fatal error.
811 		 */
812 		if (impl == -1) {
813 			impl = mc_get_sibling_cpu_impl(memnodes[i]);
814 			if (impl == -1) {
815 				SBDP_DBG_MEM("sbdp: failed to get cpu impl "
816 				    "for MC dnode=0x%x\n", memnodes[i]);
817 				return (-1);
818 			}
819 		}
820 
821 		switch (impl) {
822 		case CHEETAH_IMPL:
823 		case CHEETAH_PLUS_IMPL:
824 		case JAGUAR_IMPL:
825 			if (mc_read_regs(memnodes[i], &regs)) {
826 				SBDP_DBG_MEM("sbdp: failed to read source "
827 				    "Decode Regs of board %d", bp->bd);
828 				return (-1);
829 			}
830 
831 			for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
832 				uint64_t mc_decode = regs.mc_decode[j];
833 
834 				if ((mc_decode & SG_DECODE_VALID) !=
835 				    SG_DECODE_VALID) {
836 					continue;
837 				}
838 
839 				addr = (MC_BASE(mc_decode) << PHYS2UM_SHIFT) |
840 				    (MC_LM(mc_decode) << MC_LM_SHIFT);
841 
842 				phys_banks[*b_idx].masr_addr = addr;
843 				phys_banks[*b_idx].masr = 0;	/* unused */
844 				phys_banks[*b_idx].asi = ASI_MEM;
845 				(*b_idx)++;
846 			}
847 			break;
848 		case PANTHER_IMPL:
849 			if (mc_get_idle_reg(memnodes[i], &addr, &asi)) {
850 				return (-1);
851 			}
852 
853 			mc_idle_regs[*r_idx].addr = addr;
854 			mc_idle_regs[*r_idx].asi = asi;
855 			mc_idle_regs[*r_idx].node = memnodes[i];
856 			mc_idle_regs[*r_idx].bd_id = bp->bd;
857 			(*r_idx)++;
858 			break;
859 		default:
860 			cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
861 			    impl);
862 			ASSERT(0);
863 			return (-1);
864 		}
865 	}
866 
867 	return (0);
868 }
869 
870 /*
871  * For non-Panther MCs that do not support read-bypass-write, we do a read
872  * to each physical bank, relying on the reads to block until all outstanding
873  * write requests have completed.  This mechanism is referred to as the bus
874  * sync list and is used for Cheetah, Cheetah+, and Jaguar processors.  The
875  * bus sync list PAs for the source and target are kept together and comprise
876  * Section 1 of the rename script.
877  *
878  * For Panther processors that support the EMU Activity Status register,
879  * we ensure the writes have completed by polling the MCU_ACT_STATUS
880  * field several times to make sure the MC queues are empty.  The
881  * EMU Activity Status register PAs for the source and target are
882  * kept together and comprise Section 2 of the rename script.
883  */
884 static int
885 sbdp_prep_mc_idle_script(sbdp_bd_t *s_bp, sbdp_bd_t *t_bp,
886     sbdp_rename_script_t *rsp, int *rsp_idx)
887 {
888 	sbdp_rename_script_t *phys_banks;
889 	sbdp_mc_idle_script_t *mc_idle_regs;
890 	int	max_banks, max_regs;
891 	size_t	bsize, msize;
892 	int	nbanks = 0, nregs = 0;
893 	int	i;
894 
895 	/* CONSTCOND */
896 	ASSERT(sizeof (sbdp_rename_script_t) ==
897 	    sizeof (sbdp_mc_idle_script_t));
898 
899 	/* allocate space for both source and target */
900 	max_banks = SBDP_MAX_MEM_NODES_PER_BOARD *
901 	    SG_MAX_BANKS_PER_MC * 2;
902 	max_regs = SBDP_MAX_MEM_NODES_PER_BOARD * 2;
903 
904 	bsize = sizeof (sbdp_rename_script_t) * max_banks;
905 	msize = sizeof (sbdp_mc_idle_script_t) * max_regs;
906 
907 	phys_banks = kmem_zalloc(bsize, KM_SLEEP);
908 	mc_idle_regs = kmem_zalloc(msize, KM_SLEEP);
909 
910 	if (sbdp_prep_mc_idle_one(t_bp, phys_banks, &nbanks,
911 	    mc_idle_regs, &nregs) != 0 ||
912 	    sbdp_prep_mc_idle_one(s_bp, phys_banks, &nbanks,
913 	    mc_idle_regs, &nregs) != 0) {
914 		kmem_free(phys_banks, bsize);
915 		kmem_free(mc_idle_regs, msize);
916 		return (-1);
917 	}
918 
919 	/* section 1 */
920 	for (i = 0; i < nbanks; i++)
921 		rsp[(*rsp_idx)++] = phys_banks[i];
922 
923 	/* section 2 */
924 	for (i = 0; i < nregs; i++)
925 		rsp[(*rsp_idx)++] = *(sbdp_rename_script_t *)&mc_idle_regs[i];
926 
927 	kmem_free(phys_banks, bsize);
928 	kmem_free(mc_idle_regs, msize);
929 
930 	return (0);
931 }
932 
933 /*
934  * code assumes single mem-unit.
935  */
936 static int
937 sbdp_prep_rename_script(sbdp_cr_handle_t *cph)
938 {
939 	pnode_t			*s_nodes, *t_nodes;
940 	int			m = 0, i;
941 	sbdp_bd_t		s_bd, t_bd, *s_bdp, *t_bdp;
942 	sbdp_rename_script_t	*rsp;
943 	uint64_t		new_base, old_base, temp_base;
944 	int			s_num, t_num;
945 
946 	mutex_enter(&cph->s_bdp->bd_mutex);
947 	s_bd = *cph->s_bdp;
948 	mutex_exit(&cph->s_bdp->bd_mutex);
949 	mutex_enter(&cph->t_bdp->bd_mutex);
950 	t_bd = *cph->t_bdp;
951 	mutex_exit(&cph->t_bdp->bd_mutex);
952 
953 	s_bdp = &s_bd;
954 	t_bdp = &t_bd;
955 	s_nodes = s_bdp->nodes;
956 	t_nodes = t_bdp->nodes;
957 	s_num = s_bdp->nnum;
958 	t_num = t_bdp->nnum;
959 	rsp = cph->script;
960 
961 	/*
962 	 * Calculate the new base address for the target bd
963 	 */
964 
965 	new_base = (s_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
966 
967 	/*
968 	 * Calculate the old base address for the source bd
969 	 */
970 
971 	old_base = (t_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
972 
973 	temp_base = SG_INVAL_UM;
974 
975 	SBDP_DBG_MEM("new 0x%lx old_base ox%lx temp_base 0x%lx\n", new_base,
976 	    old_base, temp_base);
977 
978 	m = 0;
979 
980 	/*
981 	 * Ensure the MC queues have been idled on the source and target
982 	 * following the copy.
983 	 */
984 	if (sbdp_prep_mc_idle_script(s_bdp, t_bdp, rsp, &m) < 0)
985 		return (-1);
986 
987 	/*
988 	 * Script section terminator
989 	 */
990 	rsp[m].masr_addr = 0ull;
991 	rsp[m].masr = 0;
992 	rsp[m].asi = 0;
993 	m++;
994 
995 	/*
996 	 * Invalidate the base in the target mc registers
997 	 */
998 	for (i = 0; i < t_num; i++) {
999 		if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, temp_base, 1, rsp,
1000 		    &m) < 0)
1001 			return (-1);
1002 	}
1003 	/*
1004 	 * Invalidate the base in the source mc registers
1005 	 */
1006 	for (i = 0; i < s_num; i++) {
1007 		if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, temp_base, 1, rsp,
1008 		    &m) < 0)
1009 			return (-1);
1010 	}
1011 	/*
1012 	 * Copy the new base into the targets mc registers
1013 	 */
1014 	for (i = 0; i < t_num; i++) {
1015 		if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, new_base, 0, rsp,
1016 		    &m) < 0)
1017 			return (-1);
1018 	}
1019 	/*
1020 	 * Copy the old base into the source mc registers
1021 	 */
1022 	for (i = 0; i < s_num; i++) {
1023 		if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, old_base, 0, rsp,
1024 		    &m) < 0)
1025 			return (-1);
1026 	}
1027 	/*
1028 	 * Zero masr_addr value indicates the END.
1029 	 */
1030 	rsp[m].masr_addr = 0ull;
1031 	rsp[m].masr = 0;
1032 	rsp[m].asi = 0;
1033 	m++;
1034 
1035 #ifdef DEBUG
1036 	{
1037 		int	i;
1038 
1039 		SBDP_DBG_MEM("dumping copy-rename script:\n");
1040 		for (i = 0; i < m; i++) {
1041 			SBDP_DBG_MEM("0x%lx = 0x%lx, asi 0x%x\n",
1042 			    rsp[i].masr_addr, rsp[i].masr, rsp[i].asi);
1043 		}
1044 		DELAY(1000000);
1045 	}
1046 #endif /* DEBUG */
1047 
1048 	return (m * sizeof (sbdp_rename_script_t));
1049 }
1050 
1051 /*
1052  * EMU Activity Status Register needs to be read idle several times.
1053  * See Panther PRM 12.5.
1054  */
1055 #define	SBDP_MCU_IDLE_RETRIES	10
1056 #define	SBDP_MCU_IDLE_READS	3
1057 
1058 /*
1059  * Using the "__relocatable" suffix informs DTrace providers (and anything
1060  * else, for that matter) that this function's text may be manually relocated
1061  * elsewhere before it is executed.  That is, it cannot be safely instrumented
1062  * with any methodology that is PC-relative.
1063  */
1064 static int
1065 sbdp_copy_rename__relocatable(sbdp_cr_handle_t *hp, struct memlist *mlist,
1066 		register sbdp_rename_script_t *rsp)
1067 {
1068 	sbdp_cr_err_t	err = SBDP_CR_OK;
1069 	size_t		csize;
1070 	size_t		linesize;
1071 	uint_t		size;
1072 	uint64_t	caddr;
1073 	uint64_t	s_base, t_base;
1074 	sbdp_bd_t	*s_sbp, *t_sbp;
1075 	struct memlist	*ml;
1076 	sbdp_mc_idle_script_t *isp;
1077 	int		i;
1078 
1079 	caddr = ecache_flushaddr;
1080 	csize = (size_t)(cpunodes[CPU->cpu_id].ecache_size * 2);
1081 	linesize = (size_t)(cpunodes[CPU->cpu_id].ecache_linesize);
1082 
1083 	size = 0;
1084 	s_sbp = hp->s_bdp;
1085 	t_sbp = hp->t_bdp;
1086 
1087 	s_base = (uint64_t)s_sbp->bpa;
1088 	t_base = (uint64_t)t_sbp->bpa;
1089 
1090 	hp->ret = s_base;
1091 	/*
1092 	 * DO COPY.
1093 	 */
1094 	for (ml = mlist; ml; ml = ml->next) {
1095 		uint64_t	s_pa, t_pa;
1096 		uint64_t	nbytes;
1097 
1098 		s_pa = ml->address;
1099 		t_pa = t_base + (ml->address - s_base);
1100 		nbytes = ml->size;
1101 
1102 		size += nbytes;
1103 		while (nbytes != 0ull) {
1104 			/*
1105 			 * This copy does NOT use an ASI
1106 			 * that avoids the Ecache, therefore
1107 			 * the dst_pa addresses may remain
1108 			 * in our Ecache after the dst_pa
1109 			 * has been removed from the system.
1110 			 * A subsequent write-back to memory
1111 			 * will cause an ARB-stop because the
1112 			 * physical address no longer exists
1113 			 * in the system. Therefore we must
1114 			 * flush out local Ecache after we
1115 			 * finish the copy.
1116 			 */
1117 
1118 			/* copy 32 bytes at src_pa to dst_pa */
1119 			bcopy32_il(s_pa, t_pa);
1120 
1121 			/* increment by 32 bytes */
1122 			s_pa += (4 * sizeof (uint64_t));
1123 			t_pa += (4 * sizeof (uint64_t));
1124 
1125 			/* decrement by 32 bytes */
1126 			nbytes -= (4 * sizeof (uint64_t));
1127 		}
1128 	}
1129 
1130 	/*
1131 	 * Since bcopy32_il() does NOT use an ASI to bypass
1132 	 * the Ecache, we need to flush our Ecache after
1133 	 * the copy is complete.
1134 	 */
1135 	flush_ecache_il(caddr, csize, linesize);	/* inline version */
1136 
1137 	/*
1138 	 * Non-Panther MCs are idled by reading each physical bank.
1139 	 */
1140 	for (i = 0; rsp[i].asi == ASI_MEM; i++) {
1141 		(void) lddphys_il(rsp[i].masr_addr);
1142 	}
1143 
1144 	isp = (sbdp_mc_idle_script_t *)&rsp[i];
1145 
1146 	/*
1147 	 * Panther MCs are idled by polling until the MCU idle state
1148 	 * is read SBDP_MCU_IDLE_READS times in succession.
1149 	 */
1150 	while (isp->addr != 0ull) {
1151 		for (i = 0; i < SBDP_MCU_IDLE_RETRIES; i++) {
1152 			register uint64_t v;
1153 			register int n_idle = 0;
1154 
1155 
1156 			do {
1157 				v = ldxasi_il(isp->addr, isp->asi) &
1158 				    MCU_ACT_STATUS;
1159 			} while (v != MCU_ACT_STATUS &&
1160 			    ++n_idle < SBDP_MCU_IDLE_READS);
1161 
1162 			if (n_idle == SBDP_MCU_IDLE_READS)
1163 				break;
1164 		}
1165 
1166 		if (i == SBDP_MCU_IDLE_RETRIES) {
1167 			/* bailout */
1168 			hp->busy_mc = isp;
1169 			return (SBDP_CR_MC_IDLE_ERR);
1170 		}
1171 
1172 		isp++;
1173 	}
1174 
1175 	/* skip terminator */
1176 	isp++;
1177 
1178 	/*
1179 	 * The following inline assembly routine caches
1180 	 * the rename script and then caches the code that
1181 	 * will do the rename.  This is necessary
1182 	 * so that we don't have any memory references during
1183 	 * the reprogramming.  We accomplish this by first
1184 	 * jumping through the code to guarantee it's cached
1185 	 * before we actually execute it.
1186 	 */
1187 	sbdp_exec_script_il((sbdp_rename_script_t *)isp);
1188 
1189 	return (err);
1190 }
1191 static void
1192 _sbdp_copy_rename_end(void)
1193 {
1194 	/*
1195 	 * IMPORTANT:   This function's location MUST be located immediately
1196 	 *		following sbdp_copy_rename__relocatable to accurately
1197 	 *		estimate its size.  Note that this assumes (!)the
1198 	 *		compiler keeps these functions in the order in which
1199 	 *		they appear :-o
1200 	 */
1201 }
1202 int
1203 sbdp_memory_rename(sbdp_handle_t *hp)
1204 {
1205 #ifdef lint
1206 	/*
1207 	 * Delete when implemented
1208 	 */
1209 	hp = hp;
1210 #endif
1211 	return (0);
1212 }
1213 
1214 
1215 /*
1216  * In Serengeti this is a nop
1217  */
1218 int
1219 sbdp_post_configure_mem(sbdp_handle_t *hp)
1220 {
1221 #ifdef lint
1222 	hp = hp;
1223 #endif
1224 	return (0);
1225 }
1226 
1227 /*
1228  * In Serengeti this is a nop
1229  */
1230 int
1231 sbdp_post_unconfigure_mem(sbdp_handle_t *hp)
1232 {
1233 #ifdef lint
1234 	hp = hp;
1235 #endif
1236 	return (0);
1237 }
1238 
1239 /* ARGSUSED */
1240 int
1241 sbdphw_disable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1242 {
1243 	return (0);
1244 }
1245 
1246 /* ARGSUSED */
1247 int
1248 sbdphw_enable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1249 {
1250 	return (0);
1251 }
1252 
1253 /*
1254  * We are assuming one memory node therefore the base address is the lowest
1255  * segment possible
1256  */
1257 #define	PA_ABOVE_MAX	(0x8000000000000000ull)
1258 int
1259 sbdphw_get_base_physaddr(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *pa)
1260 {
1261 	_NOTE(ARGUNUSED(hp))
1262 
1263 	int i, board = -1, wnode;
1264 	pnode_t	nodeid;
1265 	struct mem_arg arg = {0};
1266 	uint64_t seg_pa, tmp_pa;
1267 	dev_info_t *list[SBDP_MAX_MEM_NODES_PER_BOARD];
1268 	int rc;
1269 
1270 	if (dip == NULL)
1271 		return (-1);
1272 
1273 	nodeid = ddi_get_nodeid(dip);
1274 
1275 	if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1276 		return (-1);
1277 
1278 	list[0] = NULL;
1279 	arg.board = board;
1280 	arg.list = list;
1281 
1282 	(void) sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
1283 
1284 	if (arg.ndips <= 0)
1285 		return (-1);
1286 
1287 	seg_pa = PA_ABOVE_MAX;
1288 
1289 	rc = -1;
1290 	for (i = 0; i < arg.ndips; i++) {
1291 		if (list[i] == NULL)
1292 			continue;
1293 		if (sbdp_get_lowest_addr_in_node(ddi_get_nodeid(list[i]),
1294 		    &tmp_pa) == 0) {
1295 			rc = 0;
1296 			if (tmp_pa < seg_pa)
1297 				seg_pa = tmp_pa;
1298 		}
1299 
1300 		/*
1301 		 * Release hold acquired in sbdp_get_mem_dip()
1302 		 */
1303 		ddi_release_devi(list[i]);
1304 	}
1305 
1306 	if (rc == 0)
1307 		*pa = seg_pa;
1308 	else {
1309 		/*
1310 		 * Record the fact that an error has occurred
1311 		 */
1312 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1313 	}
1314 
1315 	return (rc);
1316 }
1317 
1318 static int
1319 sbdp_get_lowest_addr_in_node(pnode_t node, uint64_t *pa)
1320 {
1321 	uint64_t	mc_decode, seg_pa, tmp_pa;
1322 	mc_regs_t	mc_regs, *mc_regsp = &mc_regs;
1323 	int		i, valid;
1324 	int		rc;
1325 
1326 
1327 	seg_pa = PA_ABOVE_MAX;
1328 
1329 	if (mc_read_regs(node, mc_regsp)) {
1330 		SBDP_DBG_MEM("sbdp_get_lowest_addr_in_node: failed to "
1331 		    "read source Decode Regs\n");
1332 		return (-1);
1333 	}
1334 
1335 	rc = -1;
1336 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1337 		mc_decode = mc_regsp->mc_decode[i];
1338 		valid = mc_decode >> MC_VALID_SHIFT;
1339 		tmp_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1340 		if (valid)
1341 			rc = 0;
1342 		if (valid && (tmp_pa < seg_pa))
1343 			seg_pa = tmp_pa;
1344 	}
1345 
1346 	if (rc == 0)
1347 		*pa = seg_pa;
1348 
1349 	return (rc);
1350 }
1351 
1352 int
1353 sbdp_is_mem(pnode_t node, void *arg)
1354 {
1355 	mem_op_t	*memp = (mem_op_t *)arg;
1356 	char		type[OBP_MAXPROPNAME];
1357 	int		bd;
1358 	pnode_t		*list;
1359 	int		board;
1360 	char		name[OBP_MAXDRVNAME];
1361 	int		len;
1362 
1363 	ASSERT(memp);
1364 
1365 	list = memp->nodes;
1366 	board = memp->board;
1367 
1368 	/*
1369 	 * Make sure that this node doesn't have its status
1370 	 * as failed
1371 	 */
1372 	if (sbdp_get_comp_status(node) != SBD_COND_OK) {
1373 		return (DDI_FAILURE);
1374 	}
1375 
1376 	len = prom_getproplen(node, "device_type");
1377 	if ((len > 0) && (len < OBP_MAXPROPNAME))
1378 		(void) prom_getprop(node, "device_type", (caddr_t)type);
1379 	else
1380 		type[0] = '\0';
1381 
1382 	if (strcmp(type, "memory-controller") == 0) {
1383 		int	wnode;
1384 
1385 		if (sbdp_get_bd_and_wnode_num(node, &bd, &wnode) < 0)
1386 			return (DDI_FAILURE);
1387 
1388 		if (bd == board) {
1389 			/*
1390 			 * Make sure we don't overwrite the array
1391 			 */
1392 			if (memp->nmem >= SBDP_MAX_MEM_NODES_PER_BOARD)
1393 				return (DDI_FAILURE);
1394 			(void) prom_getprop(node, OBP_NAME, (caddr_t)name);
1395 			SBDP_DBG_MEM("name %s  boot bd %d board %d\n", name,
1396 			    board, bd);
1397 			list[memp->nmem++] = node;
1398 			return (DDI_SUCCESS);
1399 		}
1400 	}
1401 
1402 	return (DDI_FAILURE);
1403 }
1404 
1405 static int
1406 sbdp_get_meminfo(pnode_t nodeid, int mc, uint64_t *size, uint64_t *base_pa)
1407 {
1408 	int		board, wnode;
1409 	int		valid;
1410 	mc_regs_t	mc_regs, *mc_regsp = &mc_regs;
1411 	uint64_t	mc_decode = 0;
1412 
1413 	if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1414 		return (-1);
1415 
1416 	if (mc_read_regs(nodeid, mc_regsp)) {
1417 		SBDP_DBG_MEM("sbdp_get_meminfo: failed to read source "
1418 		    "Decode Regs");
1419 		return (-1);
1420 	}
1421 	/*
1422 	 * Calculate memory size
1423 	 */
1424 	mc_decode = mc_regsp->mc_decode[mc];
1425 
1426 	/*
1427 	 * Check the valid bit to see if bank is there
1428 	 */
1429 	valid = mc_decode >> MC_VALID_SHIFT;
1430 	if (valid) {
1431 		*size = MC_UK2SPAN(mc_decode);
1432 		*base_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1433 	}
1434 
1435 	return (0);
1436 }
1437 
1438 
1439 /*
1440  * Luckily for us mem nodes and cpu/CMP nodes are siblings.  All we need to
1441  * do is search in the same branch as the mem node for its sibling cpu or
1442  * CMP node.
1443  */
1444 pnode_t
1445 mc_get_sibling_cpu(pnode_t nodeid)
1446 {
1447 	int	portid;
1448 
1449 	if (prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid) < 0)
1450 		return (OBP_NONODE);
1451 
1452 	/*
1453 	 * cpus and memory are siblings so we don't need to traverse
1454 	 * the whole tree, just a branch
1455 	 */
1456 	return (sbdp_find_nearby_cpu_by_portid(nodeid, portid));
1457 }
1458 
1459 /*
1460  * Given a memory node, check it's sibling cpu or CMP to see if
1461  * access to mem will be ok. We need to search for the node and
1462  * if found get its condition.
1463  */
1464 sbd_cond_t
1465 mc_check_sibling_cpu(pnode_t nodeid)
1466 {
1467 	pnode_t	cpu_node;
1468 	sbd_cond_t	cond;
1469 	int		i;
1470 
1471 	cpu_node = mc_get_sibling_cpu(nodeid);
1472 
1473 	cond = sbdp_get_comp_status(cpu_node);
1474 
1475 	if (cond == SBD_COND_OK) {
1476 		int 		wnode;
1477 		int		bd;
1478 		int		unit;
1479 		int		portid;
1480 
1481 		if (sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) < 0)
1482 			return (SBD_COND_UNKNOWN);
1483 
1484 		(void) prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid);
1485 
1486 		/*
1487 		 * Access to the memory controller should not
1488 		 * be attempted if any of the cores are marked
1489 		 * as being in reset.
1490 		 */
1491 		for (i = 0; i < SBDP_MAX_CORES_PER_CMP; i++) {
1492 			unit = SG_PORTID_TO_CPU_UNIT(portid, i);
1493 			if (sbdp_is_cpu_present(wnode, bd, unit) &&
1494 			    sbdp_is_cpu_in_reset(wnode, bd, unit)) {
1495 				cond = SBD_COND_UNUSABLE;
1496 				break;
1497 			}
1498 		}
1499 	}
1500 
1501 	return (cond);
1502 }
1503 
1504 int
1505 mc_read_regs(pnode_t nodeid, mc_regs_t *mc_regsp)
1506 {
1507 	int			len;
1508 	uint64_t		mc_addr, mask;
1509 	mc_regspace		reg;
1510 	sbd_cond_t		sibling_cpu_cond;
1511 	int			local_mc;
1512 	int			portid;
1513 	int			i;
1514 
1515 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1516 	    (portid == -1))
1517 		return (-1);
1518 
1519 	/*
1520 	 * mc should not be accessed if their corresponding cpu
1521 	 * has failed.
1522 	 */
1523 	sibling_cpu_cond = mc_check_sibling_cpu(nodeid);
1524 
1525 	if ((sibling_cpu_cond == SBD_COND_FAILED) ||
1526 	    (sibling_cpu_cond == SBD_COND_UNUSABLE)) {
1527 		return (-1);
1528 	}
1529 
1530 	len = prom_getproplen(nodeid, "reg");
1531 	if (len != sizeof (mc_regspace))
1532 		return (-1);
1533 
1534 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
1535 		return (-1);
1536 
1537 	mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1538 	mc_addr |= (uint64_t)reg.regspec_addr_lo;
1539 
1540 	/*
1541 	 * Make sure we don't switch cpus
1542 	 */
1543 	affinity_set(CPU_CURRENT);
1544 	if (portid == cpunodes[CPU->cpu_id].portid)
1545 		local_mc = 1;
1546 	else
1547 		local_mc = 0;
1548 
1549 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
1550 		mask = SG_REG_2_OFFSET(i);
1551 
1552 		/*
1553 		 * If the memory controller is local to this CPU, we use
1554 		 * the special ASI to read the decode registers.
1555 		 * Otherwise, we load the values from a magic address in
1556 		 * I/O space.
1557 		 */
1558 		if (local_mc) {
1559 			mc_regsp->mc_decode[i] = lddmcdecode(
1560 			    mask & MC_OFFSET_MASK);
1561 		} else {
1562 			mc_regsp->mc_decode[i] = lddphysio(
1563 			    (mc_addr | mask));
1564 		}
1565 	}
1566 	affinity_clear();
1567 
1568 	return (0);
1569 }
1570 
1571 uint64_t
1572 mc_get_addr(pnode_t nodeid, int mc, uint_t *asi)
1573 {
1574 	int			len;
1575 	uint64_t		mc_addr, addr;
1576 	mc_regspace		reg;
1577 	int			portid;
1578 	int			local_mc;
1579 
1580 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1581 	    (portid == -1))
1582 		return (-1);
1583 
1584 	len = prom_getproplen(nodeid, "reg");
1585 	if (len != sizeof (mc_regspace))
1586 		return (-1);
1587 
1588 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
1589 		return (-1);
1590 
1591 	mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1592 	mc_addr |= (uint64_t)reg.regspec_addr_lo;
1593 
1594 	/*
1595 	 * Make sure we don't switch cpus
1596 	 */
1597 	affinity_set(CPU_CURRENT);
1598 	if (portid == cpunodes[CPU->cpu_id].portid)
1599 		local_mc = 1;
1600 	else
1601 		local_mc = 0;
1602 
1603 	if (local_mc) {
1604 		*asi = ASI_MC_DECODE;
1605 		addr = SG_REG_2_OFFSET(mc) & MC_OFFSET_MASK;
1606 	} else {
1607 		*asi = ASI_IO;
1608 		addr = SG_REG_2_OFFSET(mc) | mc_addr;
1609 	}
1610 	affinity_clear();
1611 
1612 	return (addr);
1613 }
1614 
1615 /* ARGSUSED */
1616 int
1617 sbdp_mem_add_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1618 {
1619 	return (0);
1620 }
1621 
1622 int
1623 sbdp_mem_del_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1624 {
1625 	pfn_t		 basepfn = (pfn_t)(address >> PAGESHIFT);
1626 	pgcnt_t		 npages = (pgcnt_t)(size >> PAGESHIFT);
1627 
1628 	if (size > 0) {
1629 		int rv;
1630 		rv = kcage_range_delete_post_mem_del(basepfn, npages);
1631 		if (rv != 0) {
1632 			cmn_err(CE_WARN,
1633 			    "unexpected kcage_range_delete_post_mem_del"
1634 			    " return value %d", rv);
1635 			sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1636 			return (-1);
1637 		}
1638 	}
1639 	return (0);
1640 }
1641 
1642 /*
1643  * This routine gets the size including the
1644  * bad banks
1645  */
1646 int
1647 sbdp_get_mem_size(sbdp_handle_t *hp)
1648 {
1649 	uint64_t	size = 0;
1650 	struct memlist	*mlist, *ml;
1651 
1652 	mlist = sbdp_get_memlist(hp, (dev_info_t *)NULL);
1653 
1654 	for (ml = mlist; ml; ml = ml->next)
1655 		size += ml->size;
1656 
1657 	(void) sbdp_del_memlist(hp, mlist);
1658 
1659 	SBDP_DBG_MEM("sbdp_get_mem_size: size 0x%" PRIx64 "\n", size);
1660 
1661 	return (btop(size));
1662 }
1663 
1664 /*
1665  * This function compares the list of banks passed with the banks
1666  * in the segment
1667  */
1668 int
1669 sbdp_check_seg_with_banks(sbdp_seg_t *seg, sbdp_bank_t *banks)
1670 {
1671 	sbdp_bank_t	*cur_bank, *bank;
1672 	int		i = 0;
1673 
1674 	for (cur_bank = seg->banks; cur_bank; cur_bank = cur_bank->seg_next) {
1675 		for (bank = banks; bank; bank = bank->bd_next) {
1676 			if (!bank->valid)
1677 				continue;
1678 
1679 			if (cur_bank == bank) {
1680 				i++;
1681 			}
1682 		}
1683 	}
1684 
1685 	SBDP_DBG_MEM("banks found = %d total banks = %d\n", i, seg->nbanks);
1686 	/*
1687 	 * If we find the same num of banks that are equal, then this segment
1688 	 * is not interleaved across boards
1689 	 */
1690 	if (i == seg->nbanks)
1691 		return (0);
1692 
1693 	return (1);
1694 }
1695 
1696 
1697 /*
1698  * This routine determines if any of the memory banks on the board
1699  * participate in across board memory interleaving
1700  */
1701 int
1702 sbdp_isinterleaved(sbdp_handle_t *hp, dev_info_t *dip)
1703 {
1704 	_NOTE(ARGUNUSED(dip))
1705 
1706 	sbdp_bank_t	*bankp;
1707 	int		wnode, board;
1708 	int		is_interleave = 0;
1709 	sbdp_bd_t	*bdp;
1710 	uint64_t	base;
1711 	sbdp_seg_t	*seg;
1712 
1713 	board = hp->h_board;
1714 	wnode = hp->h_wnode;
1715 
1716 #ifdef DEBUG
1717 	sbdp_print_all_segs();
1718 #endif
1719 	/*
1720 	 * Get the banks for this board
1721 	 */
1722 	bdp = sbdp_get_bd_info(wnode, board);
1723 
1724 	if (bdp == NULL)
1725 		return (-1);
1726 
1727 	/*
1728 	 * Search for the first bank with valid memory
1729 	 */
1730 	for (bankp = bdp->banks; bankp; bankp = bankp->bd_next)
1731 		if (bankp->valid)
1732 			break;
1733 
1734 	/*
1735 	 * If there are no banks in the board, then the board is
1736 	 * not interleaved across boards
1737 	 */
1738 	if (bankp == NULL) {
1739 		return (0);
1740 	}
1741 
1742 	base = bankp->um & ~(bankp->uk);
1743 
1744 	/*
1745 	 * Find the segment for the first bank
1746 	 */
1747 	if ((seg = sbdp_get_seg(base)) == NULL) {
1748 		/*
1749 		 * Something bad has happened.
1750 		 */
1751 		return (-1);
1752 	}
1753 	/*
1754 	 * Make sure that this segment is only composed of the banks
1755 	 * in this board. If one is missing or we have an extra one
1756 	 * the board is interleaved across boards
1757 	 */
1758 	is_interleave = sbdp_check_seg_with_banks(seg, bdp->banks);
1759 
1760 	SBDP_DBG_MEM("interleave is %d\n", is_interleave);
1761 
1762 	return (is_interleave);
1763 }
1764 
1765 
1766 /*
1767  * Each node has 4 logical banks.  This routine adds all the banks (including
1768  * the invalid ones to the passed list. Note that we use the bd list and not
1769  * the seg list
1770  */
1771 int
1772 sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks)
1773 {
1774 	int		i;
1775 	mc_regs_t	regs;
1776 	uint64_t	*mc_decode;
1777 	sbdp_bank_t 	*bank;
1778 
1779 	if (mc_read_regs(node, &regs) == -1)
1780 		return (-1);
1781 
1782 	mc_decode = regs.mc_decode;
1783 
1784 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1785 		/*
1786 		 * This creates the mem for the new member of the list
1787 		 */
1788 		sbdp_fill_bank_info(mc_decode[i], &bank);
1789 
1790 		SBDP_DBG_MEM("adding bank %d\n", bank->id);
1791 
1792 		/*
1793 		 * Insert bank into the beginning of the list
1794 		 */
1795 		bank->bd_next = *banks;
1796 		*banks = bank;
1797 
1798 		/*
1799 		 * Add this bank into its corresponding
1800 		 * segment
1801 		 */
1802 		sbdp_add_bank_to_seg(bank);
1803 	}
1804 	return (0);
1805 }
1806 
1807 /*
1808  * given the info, create a new bank node and set the info
1809  * as appropriate. We allocate the memory for the bank. It is
1810  * up to the caller to ensure the mem is freed
1811  */
1812 void
1813 sbdp_fill_bank_info(uint64_t mc_decode, sbdp_bank_t **bank)
1814 {
1815 	static int	id = 0;
1816 	sbdp_bank_t	*new;
1817 
1818 	new = kmem_zalloc(sizeof (sbdp_bank_t), KM_SLEEP);
1819 
1820 	new->id = id++;
1821 	new->valid = (mc_decode >> MC_VALID_SHIFT);
1822 	new->uk = MC_UK(mc_decode);
1823 	new->um = MC_UM(mc_decode);
1824 	new->lk = MC_LK(mc_decode);
1825 	new->lm = MC_LM(mc_decode);
1826 	new->bd_next = NULL;
1827 	new->seg_next = NULL;
1828 
1829 	*bank = new;
1830 }
1831 
1832 /*
1833  * Each bd has the potential of having mem banks on it.  The banks
1834  * may be empty or not.  This routine gets all the mem banks
1835  * for this bd
1836  */
1837 void
1838 sbdp_init_bd_banks(sbdp_bd_t *bdp)
1839 {
1840 	int		i, nmem;
1841 	pnode_t		*lists;
1842 
1843 	lists = bdp->nodes;
1844 	nmem = bdp->nnum;
1845 
1846 	if (bdp->banks != NULL) {
1847 		return;
1848 	}
1849 
1850 	bdp->banks = NULL;
1851 
1852 	for (i = 0; i < nmem; i++) {
1853 		(void) sbdp_add_nodes_banks(lists[i], &bdp->banks);
1854 	}
1855 }
1856 
1857 /*
1858  * swap the list of banks for the 2 boards
1859  */
1860 void
1861 sbdp_swap_list_of_banks(sbdp_bd_t *bdp1, sbdp_bd_t *bdp2)
1862 {
1863 	sbdp_bank_t	*tmp_ptr;
1864 
1865 	if ((bdp1 == NULL) || (bdp2 == NULL))
1866 		return;
1867 
1868 	tmp_ptr = bdp1->banks;
1869 	bdp1->banks = bdp2->banks;
1870 	bdp2->banks = tmp_ptr;
1871 }
1872 
1873 /*
1874  * free all the banks on the board.  Note that a bank node belongs
1875  * to 2 lists. The first list is the board list. The second one is
1876  * the seg list. We only need to remove the bank from both lists but only
1877  * free the node once.
1878  */
1879 void
1880 sbdp_fini_bd_banks(sbdp_bd_t *bdp)
1881 {
1882 	sbdp_bank_t	*bkp, *nbkp;
1883 
1884 	for (bkp = bdp->banks; bkp; ) {
1885 		/*
1886 		 * Remove the bank from the seg list first
1887 		 */
1888 		SBDP_DBG_MEM("Removing bank %d\n", bkp->id);
1889 		sbdp_remove_bank_from_seg(bkp);
1890 		nbkp = bkp->bd_next;
1891 		bkp->bd_next = NULL;
1892 		kmem_free(bkp, sizeof (sbdp_bank_t));
1893 
1894 		bkp = nbkp;
1895 	}
1896 	bdp->banks = NULL;
1897 }
1898 
1899 #ifdef DEBUG
1900 void
1901 sbdp_print_bd_banks(sbdp_bd_t *bdp)
1902 {
1903 	sbdp_bank_t	*bp;
1904 	int		i;
1905 
1906 	SBDP_DBG_MEM("BOARD %d\n", bdp->bd);
1907 
1908 	for (bp = bdp->banks, i = 0; bp; bp = bp->bd_next, i++) {
1909 		SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1910 		SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1911 		    "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1912 		    bp->lk, bp->lm);
1913 	}
1914 }
1915 
1916 void
1917 sbdp_print_all_segs(void)
1918 {
1919 	sbdp_seg_t	*cur_seg;
1920 
1921 	for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next)
1922 		sbdp_print_seg(cur_seg);
1923 }
1924 
1925 void
1926 sbdp_print_seg(sbdp_seg_t *seg)
1927 {
1928 	sbdp_bank_t	*bp;
1929 	int		i;
1930 
1931 	SBDP_DBG_MEM("SEG %d\n", seg->id);
1932 
1933 	for (bp = seg->banks, i = 0; bp; bp = bp->seg_next, i++) {
1934 		SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1935 		SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1936 		    "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1937 		    bp->lk, bp->lm);
1938 	}
1939 }
1940 #endif
1941 
1942 void
1943 sbdp_add_bank_to_seg(sbdp_bank_t *bank)
1944 {
1945 	uint64_t	base;
1946 	sbdp_seg_t	*cur_seg;
1947 	static int	id = 0;
1948 
1949 	/*
1950 	 * if we got an invalid bank just skip it
1951 	 */
1952 	if (bank == NULL || !bank->valid)
1953 		return;
1954 	base = bank->um & ~(bank->uk);
1955 
1956 	if ((cur_seg = sbdp_get_seg(base)) == NULL) {
1957 		/*
1958 		 * This bank is part of a new segment, so create
1959 		 * a struct for it and added to the list of segments
1960 		 */
1961 		cur_seg = kmem_zalloc(sizeof (sbdp_seg_t), KM_SLEEP);
1962 		cur_seg->id = id++;
1963 		cur_seg->base = base;
1964 		cur_seg->size = ((bank->uk +1) << PHYS2UM_SHIFT);
1965 		cur_seg->intlv = ((bank->lk ^ 0xF) + 1);
1966 		/*
1967 		 * add to the seg list
1968 		 */
1969 		cur_seg->next = sys_seg;
1970 		sys_seg = cur_seg;
1971 	}
1972 
1973 	cur_seg->nbanks++;
1974 	/*
1975 	 * add bank into segs bank list.  Note we add at the head
1976 	 */
1977 	bank->seg_next = cur_seg->banks;
1978 	cur_seg->banks = bank;
1979 }
1980 
1981 /*
1982  * Remove this segment from the seg list
1983  */
1984 void
1985 sbdp_rm_seg(sbdp_seg_t *seg)
1986 {
1987 	sbdp_seg_t	**curpp, *curp;
1988 
1989 	curpp = &sys_seg;
1990 
1991 	while ((curp = *curpp) != NULL) {
1992 		if (curp == seg) {
1993 			*curpp = curp->next;
1994 			break;
1995 		}
1996 		curpp = &curp->next;
1997 	}
1998 
1999 	if (curp != NULL) {
2000 		kmem_free(curp, sizeof (sbdp_seg_t));
2001 		curp = NULL;
2002 	}
2003 }
2004 
2005 /*
2006  * remove this bank from its seg list
2007  */
2008 void
2009 sbdp_remove_bank_from_seg(sbdp_bank_t *bank)
2010 {
2011 	uint64_t	base;
2012 	sbdp_seg_t	*cur_seg;
2013 	sbdp_bank_t	**curpp, *curp;
2014 
2015 	/*
2016 	 * if we got an invalid bank just skip it
2017 	 */
2018 	if (bank == NULL || !bank->valid)
2019 		return;
2020 	base = bank->um & ~(bank->uk);
2021 
2022 	/*
2023 	 * If the bank doesn't belong to any seg just return
2024 	 */
2025 	if ((cur_seg = sbdp_get_seg(base)) == NULL) {
2026 		SBDP_DBG_MEM("bank %d with no segment\n", bank->id);
2027 		return;
2028 	}
2029 
2030 	/*
2031 	 * Find bank in the seg
2032 	 */
2033 	curpp = &cur_seg->banks;
2034 
2035 	while ((curp = *curpp) != NULL) {
2036 		if (curp->id == bank->id) {
2037 			/*
2038 			 * found node, remove it
2039 			 */
2040 			*curpp = curp->seg_next;
2041 			break;
2042 		}
2043 		curpp = &curp->seg_next;
2044 	}
2045 
2046 	if (curp != NULL) {
2047 		cur_seg->nbanks--;
2048 	}
2049 
2050 	if (cur_seg->nbanks == 0) {
2051 		/*
2052 		 * No banks left on this segment, remove the segment
2053 		 */
2054 		SBDP_DBG_MEM("No banks left in this segment, removing it\n");
2055 		sbdp_rm_seg(cur_seg);
2056 	}
2057 }
2058 
2059 sbdp_seg_t *
2060 sbdp_get_seg(uint64_t base)
2061 {
2062 	sbdp_seg_t	*cur_seg;
2063 
2064 	for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next) {
2065 		if (cur_seg-> base == base)
2066 			break;
2067 	}
2068 
2069 	return (cur_seg);
2070 }
2071 
2072 #ifdef DEBUG
2073 int
2074 sbdp_passthru_readmem(sbdp_handle_t *hp, void *arg)
2075 {
2076 	_NOTE(ARGUNUSED(hp))
2077 	_NOTE(ARGUNUSED(arg))
2078 
2079 	struct memlist	*ml;
2080 	uint64_t	src_pa;
2081 	uint64_t	dst_pa;
2082 	uint64_t	dst;
2083 
2084 
2085 	dst_pa = va_to_pa(&dst);
2086 
2087 	memlist_read_lock();
2088 	for (ml = phys_install; ml; ml = ml->next) {
2089 		uint64_t	nbytes;
2090 
2091 		src_pa = ml->address;
2092 		nbytes = ml->size;
2093 
2094 		while (nbytes != 0ull) {
2095 
2096 			/* copy 32 bytes at src_pa to dst_pa */
2097 			bcopy32_il(src_pa, dst_pa);
2098 
2099 			/* increment by 32 bytes */
2100 			src_pa += (4 * sizeof (uint64_t));
2101 
2102 			/* decrement by 32 bytes */
2103 			nbytes -= (4 * sizeof (uint64_t));
2104 		}
2105 	}
2106 	memlist_read_unlock();
2107 
2108 	return (0);
2109 }
2110 
2111 static int
2112 isdigit(int ch)
2113 {
2114 	return (ch >= '0' && ch <= '9');
2115 }
2116 
2117 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
2118 
2119 int
2120 sbdp_strtoi(char *p, char **pos)
2121 {
2122 	int n;
2123 	int c, neg = 0;
2124 
2125 	if (!isdigit(c = *p)) {
2126 		while (isspace(c))
2127 			c = *++p;
2128 		switch (c) {
2129 			case '-':
2130 				neg++;
2131 				/* FALLTHROUGH */
2132 			case '+':
2133 				c = *++p;
2134 		}
2135 		if (!isdigit(c)) {
2136 			if (pos != NULL)
2137 				*pos = p;
2138 			return (0);
2139 		}
2140 	}
2141 	for (n = '0' - c; isdigit(c = *++p); ) {
2142 		n *= 10; /* two steps to avoid unnecessary overflow */
2143 		n += '0' - c; /* accum neg to avoid surprises at MAX */
2144 	}
2145 	if (pos != NULL)
2146 		*pos = p;
2147 	return (neg ? n : -n);
2148 }
2149 
2150 int
2151 sbdp_passthru_prep_script(sbdp_handle_t *hp, void *arg)
2152 {
2153 	int			board, i;
2154 	sbdp_bd_t		*t_bdp, *s_bdp;
2155 	char			*opts;
2156 	int			t_board;
2157 	sbdp_rename_script_t	*rsbuffer;
2158 	sbdp_cr_handle_t	*cph;
2159 	int			scriptlen, size;
2160 
2161 	opts = (char *)arg;
2162 	board = hp->h_board;
2163 
2164 	opts += strlen("prep-script=");
2165 	t_board = sbdp_strtoi(opts, NULL);
2166 
2167 	cph =  kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
2168 
2169 	size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
2170 	rsbuffer = kmem_zalloc(size, KM_SLEEP);
2171 
2172 	s_bdp = sbdp_get_bd_info(hp->h_wnode, board);
2173 	t_bdp = sbdp_get_bd_info(hp->h_wnode, t_board);
2174 
2175 	cph->s_bdp = s_bdp;
2176 	cph->t_bdp = t_bdp;
2177 	cph->script = rsbuffer;
2178 
2179 	affinity_set(CPU_CURRENT);
2180 	scriptlen = sbdp_prep_rename_script(cph);
2181 
2182 	if (scriptlen <= 0) {
2183 		cmn_err(CE_WARN,
2184 		"sbdp failed to prep for copy-rename");
2185 	}
2186 	prom_printf("SCRIPT from board %d to board %d ->\n", board, t_board);
2187 	for (i = 0;  i < (scriptlen / (sizeof (sbdp_rename_script_t))); i++) {
2188 		prom_printf("0x%lx = 0x%lx, asi 0x%x\n",
2189 		    rsbuffer[i].masr_addr, rsbuffer[i].masr, rsbuffer[i].asi);
2190 	}
2191 	prom_printf("\n");
2192 
2193 	affinity_clear();
2194 	kmem_free(rsbuffer, size);
2195 	kmem_free(cph, sizeof (sbdp_cr_handle_t));
2196 
2197 	return (0);
2198 }
2199 #endif
2200