1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * memory management for serengeti dr memory
29 */
30
31 #include <sys/obpdefs.h>
32 #include <sys/types.h>
33 #include <sys/conf.h>
34 #include <sys/ddi.h>
35 #include <sys/cpuvar.h>
36 #include <sys/memlist_impl.h>
37 #include <sys/machsystm.h>
38 #include <sys/promif.h>
39 #include <sys/mem_cage.h>
40 #include <sys/kmem.h>
41 #include <sys/note.h>
42 #include <sys/lgrp.h>
43
44 #include <sys/sbd_ioctl.h>
45 #include <sys/sbd.h>
46 #include <sys/sbdp_priv.h>
47 #include <sys/sbdp_mem.h>
48 #include <sys/sun4asi.h>
49 #include <sys/cheetahregs.h>
50 #include <sys/cpu_module.h>
51 #include <sys/esunddi.h>
52
53 #include <vm/page.h>
54
55 static int sbdp_get_meminfo(pnode_t, int, uint64_t *, uint64_t *);
56 int mc_read_regs(pnode_t, mc_regs_t *);
57 uint64_t mc_get_addr(pnode_t, int, uint_t *);
58 static pnode_t mc_get_sibling_cpu(pnode_t nodeid);
59 static int mc_get_sibling_cpu_impl(pnode_t nodeid);
60 static sbd_cond_t mc_check_sibling_cpu(pnode_t nodeid);
61 static void _sbdp_copy_rename_end(void);
62 static int sbdp_copy_rename__relocatable(sbdp_cr_handle_t *,
63 struct memlist *, sbdp_rename_script_t *);
64 static int sbdp_prep_rename_script(sbdp_cr_handle_t *);
65 static int sbdp_get_lowest_addr_in_node(pnode_t, uint64_t *);
66
67 extern void bcopy32_il(uint64_t, uint64_t);
68 extern void flush_ecache_il(uint64_t physaddr, size_t size, size_t linesize);
69 extern uint64_t lddphys_il(uint64_t physaddr);
70 extern uint64_t ldxasi_il(uint64_t physaddr, uint_t asi);
71 extern void sbdp_exec_script_il(sbdp_rename_script_t *rsp);
72 void sbdp_fill_bank_info(uint64_t, sbdp_bank_t **);
73 int sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks);
74 void sbdp_add_bank_to_seg(sbdp_bank_t *);
75 void sbdp_remove_bank_from_seg(sbdp_bank_t *);
76 uint64_t sbdp_determine_slice(sbdp_handle_t *);
77 sbdp_seg_t *sbdp_get_seg(uint64_t);
78 #ifdef DEBUG
79 void sbdp_print_seg(sbdp_seg_t *);
80 #endif
81
82 /*
83 * Head to the system segments link list
84 */
85 sbdp_seg_t *sys_seg = NULL;
86
87 uint64_t
sbdp_determine_slice(sbdp_handle_t * hp)88 sbdp_determine_slice(sbdp_handle_t *hp)
89 {
90 int size;
91
92 size = sbdp_get_mem_size(hp);
93
94 if (size <= SG_SLICE_16G_SIZE) {
95 return (SG_SLICE_16G_SIZE);
96 } else if (size <= SG_SLICE_32G_SIZE) {
97 return (SG_SLICE_32G_SIZE);
98 } else {
99 return (SG_SLICE_64G_SIZE);
100 }
101 }
102
103 /* ARGSUSED */
104 int
sbdp_get_mem_alignment(sbdp_handle_t * hp,dev_info_t * dip,uint64_t * align)105 sbdp_get_mem_alignment(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *align)
106 {
107 *align = sbdp_determine_slice(hp);
108 return (0);
109 }
110
111
112 void
sbdp_memlist_dump(struct memlist * mlist)113 sbdp_memlist_dump(struct memlist *mlist)
114 {
115 register struct memlist *ml;
116
117 if (mlist == NULL) {
118 SBDP_DBG_MEM("memlist> EMPTY\n");
119 } else {
120 for (ml = mlist; ml; ml = ml->ml_next)
121 SBDP_DBG_MEM("memlist> 0x%" PRIx64", 0x%" PRIx64"\n",
122 ml->ml_address, ml->ml_size);
123 }
124 }
125
126 struct mem_arg {
127 int board;
128 int ndips;
129 dev_info_t **list;
130 };
131
132 /*
133 * Returns mem dip held
134 */
135 static int
sbdp_get_mem_dip(pnode_t node,void * arg,uint_t flags)136 sbdp_get_mem_dip(pnode_t node, void *arg, uint_t flags)
137 {
138 _NOTE(ARGUNUSED(flags))
139
140 dev_info_t *dip;
141 pnode_t nodeid;
142 mem_op_t mem = {0};
143 struct mem_arg *ap = arg;
144
145 if (node == OBP_BADNODE || node == OBP_NONODE)
146 return (DDI_FAILURE);
147
148 mem.nodes = &nodeid;
149 mem.board = ap->board;
150 mem.nmem = 0;
151
152 (void) sbdp_is_mem(node, &mem);
153
154 ASSERT(mem.nmem == 0 || mem.nmem == 1);
155
156 if (mem.nmem == 0 || nodeid != node)
157 return (DDI_FAILURE);
158
159 dip = e_ddi_nodeid_to_dip(nodeid);
160 if (dip) {
161 ASSERT(ap->ndips < SBDP_MAX_MEM_NODES_PER_BOARD);
162 ap->list[ap->ndips++] = dip;
163 }
164 return (DDI_SUCCESS);
165 }
166
167 struct memlist *
sbdp_get_memlist(sbdp_handle_t * hp,dev_info_t * dip)168 sbdp_get_memlist(sbdp_handle_t *hp, dev_info_t *dip)
169 {
170 _NOTE(ARGUNUSED(dip))
171
172 int i, j, skip = 0;
173 dev_info_t *list[SBDP_MAX_MEM_NODES_PER_BOARD];
174 struct mem_arg arg = {0};
175 uint64_t base_pa, size;
176 struct memlist *mlist = NULL;
177
178 list[0] = NULL;
179 arg.board = hp->h_board;
180 arg.list = list;
181
182 sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
183
184 for (i = 0; i < arg.ndips; i++) {
185 if (list[i] == NULL)
186 continue;
187
188 size = 0;
189 for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
190 if (sbdp_get_meminfo(ddi_get_nodeid(list[i]), j,
191 &size, &base_pa)) {
192 skip++;
193 continue;
194 }
195 if (size == -1 || size == 0)
196 continue;
197
198 (void) memlist_add_span(base_pa, size, &mlist);
199 }
200
201 /*
202 * Release hold acquired in sbdp_get_mem_dip()
203 */
204 ddi_release_devi(list[i]);
205 }
206
207 /*
208 * XXX - The following two lines are from existing code.
209 * However, this appears to be incorrect - this check should be
210 * made for each dip in list i.e within the for(i) loop.
211 */
212 if (skip == SBDP_MAX_MCS_PER_NODE)
213 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
214
215 SBDP_DBG_MEM("memlist for board %d\n", hp->h_board);
216 sbdp_memlist_dump(mlist);
217 return (mlist);
218 }
219
220 struct memlist *
sbdp_memlist_dup(struct memlist * mlist)221 sbdp_memlist_dup(struct memlist *mlist)
222 {
223 struct memlist *hl, *prev;
224
225 if (mlist == NULL)
226 return (NULL);
227
228 prev = NULL;
229 hl = NULL;
230 for (; mlist; mlist = mlist->ml_next) {
231 struct memlist *mp;
232
233 mp = memlist_get_one();
234 if (mp == NULL) {
235 if (hl != NULL)
236 memlist_free_list(hl);
237 hl = NULL;
238 break;
239 }
240 mp->ml_address = mlist->ml_address;
241 mp->ml_size = mlist->ml_size;
242 mp->ml_next = NULL;
243 mp->ml_prev = prev;
244
245 if (prev == NULL)
246 hl = mp;
247 else
248 prev->ml_next = mp;
249 prev = mp;
250 }
251
252 return (hl);
253 }
254
255 int
sbdp_del_memlist(sbdp_handle_t * hp,struct memlist * mlist)256 sbdp_del_memlist(sbdp_handle_t *hp, struct memlist *mlist)
257 {
258 _NOTE(ARGUNUSED(hp))
259
260 memlist_free_list(mlist);
261
262 return (0);
263 }
264
265 /*ARGSUSED*/
266 static void
sbdp_flush_ecache(uint64_t a,uint64_t b)267 sbdp_flush_ecache(uint64_t a, uint64_t b)
268 {
269 cpu_flush_ecache();
270 }
271
272 typedef enum {
273 SBDP_CR_OK,
274 SBDP_CR_MC_IDLE_ERR
275 } sbdp_cr_err_t;
276
277 int
sbdp_move_memory(sbdp_handle_t * hp,int t_bd)278 sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
279 {
280 sbdp_bd_t *s_bdp, *t_bdp;
281 int err = 0;
282 caddr_t mempage;
283 ulong_t data_area, index_area;
284 ulong_t e_area, e_page;
285 int availlen, indexlen, funclen, scriptlen;
286 int *indexp;
287 time_t copytime;
288 int (*funcp)();
289 size_t size;
290 struct memlist *mlist;
291 sbdp_sr_handle_t *srhp;
292 sbdp_rename_script_t *rsp;
293 sbdp_rename_script_t *rsbuffer;
294 sbdp_cr_handle_t *cph;
295 int linesize;
296 uint64_t neer;
297 sbdp_cr_err_t cr_err;
298
299 cph = kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
300
301 SBDP_DBG_MEM("moving memory from memory board %d to board %d\n",
302 hp->h_board, t_bd);
303
304 s_bdp = sbdp_get_bd_info(hp->h_wnode, hp->h_board);
305 t_bdp = sbdp_get_bd_info(hp->h_wnode, t_bd);
306
307 if ((s_bdp == NULL) || (t_bdp == NULL)) {
308 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
309 return (-1);
310 }
311
312 funclen = (int)((ulong_t)_sbdp_copy_rename_end -
313 (ulong_t)sbdp_copy_rename__relocatable);
314
315 if (funclen > PAGESIZE) {
316 cmn_err(CE_WARN,
317 "sbdp: copy-rename funclen (%d) > PAGESIZE (%d)",
318 funclen, PAGESIZE);
319 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
320 return (-1);
321 }
322
323 /*
324 * mempage will be page aligned, since we're calling
325 * kmem_alloc() with an exact multiple of PAGESIZE.
326 */
327 mempage = kmem_alloc(PAGESIZE, KM_SLEEP);
328
329 SBDP_DBG_MEM("mempage = 0x%p\n", (void *)mempage);
330
331 /*
332 * Copy the code for the copy-rename routine into
333 * a page aligned piece of memory. We do this to guarantee
334 * that we're executing within the same page and thus reduce
335 * the possibility of cache collisions between different
336 * pages.
337 */
338 bcopy((caddr_t)sbdp_copy_rename__relocatable, mempage, funclen);
339
340 funcp = (int (*)())mempage;
341
342 SBDP_DBG_MEM("copy-rename funcp = 0x%p (len = 0x%x)\n", (void *)funcp,
343 funclen);
344
345 /*
346 * Prepare data page that will contain script of
347 * operations to perform during copy-rename.
348 * Allocate temporary buffer to hold script.
349 */
350
351 size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
352 rsbuffer = kmem_zalloc(size, KM_SLEEP);
353
354 cph->s_bdp = s_bdp;
355 cph->t_bdp = t_bdp;
356 cph->script = rsbuffer;
357
358 /*
359 * We need to make sure we don't switch cpus since we depend on the
360 * correct cpu processing
361 */
362 affinity_set(CPU_CURRENT);
363 scriptlen = sbdp_prep_rename_script(cph);
364 if (scriptlen <= 0) {
365 cmn_err(CE_WARN, "sbdp failed to prep for copy-rename");
366 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
367 err = 1;
368 goto cleanup;
369 }
370 SBDP_DBG_MEM("copy-rename script length = 0x%x\n", scriptlen);
371
372 indexlen = sizeof (*indexp) << 1;
373
374 if ((funclen + scriptlen + indexlen) > PAGESIZE) {
375 cmn_err(CE_WARN, "sbdp: func len (%d) + script len (%d) "
376 "+ index len (%d) > PAGESIZE (%d)", funclen, scriptlen,
377 indexlen, PAGESIZE);
378 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
379 err = 1;
380 goto cleanup;
381 }
382
383 linesize = cpunodes[CPU->cpu_id].ecache_linesize;
384
385 /*
386 * Find aligned area within data page to maintain script.
387 */
388 data_area = (ulong_t)mempage;
389 data_area += (ulong_t)funclen + (ulong_t)(linesize - 1);
390 data_area &= ~((ulong_t)(linesize - 1));
391
392 availlen = PAGESIZE - indexlen;
393 availlen -= (int)(data_area - (ulong_t)mempage);
394
395 if (availlen < scriptlen) {
396 cmn_err(CE_WARN, "sbdp: available len (%d) < script len (%d)",
397 availlen, scriptlen);
398 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
399 err = 1;
400 goto cleanup;
401 }
402
403 SBDP_DBG_MEM("copy-rename script data area = 0x%lx\n",
404 data_area);
405
406 bcopy((caddr_t)rsbuffer, (caddr_t)data_area, scriptlen);
407 rsp = (sbdp_rename_script_t *)data_area;
408
409 index_area = data_area + (ulong_t)scriptlen + (ulong_t)(linesize - 1);
410 index_area &= ~((ulong_t)(linesize - 1));
411 indexp = (int *)index_area;
412 indexp[0] = 0;
413 indexp[1] = 0;
414
415 e_area = index_area + (ulong_t)indexlen;
416 e_page = (ulong_t)mempage + PAGESIZE;
417 if (e_area > e_page) {
418 cmn_err(CE_WARN,
419 "sbdp: index area size (%d) > available (%d)\n",
420 indexlen, (int)(e_page - index_area));
421 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
422 err = 1;
423 goto cleanup;
424 }
425
426 SBDP_DBG_MEM("copy-rename index area = 0x%p\n", (void *)indexp);
427
428 SBDP_DBG_MEM("cpu %d\n", CPU->cpu_id);
429
430 srhp = sbdp_get_sr_handle();
431 ASSERT(srhp);
432
433 srhp->sr_flags = hp->h_flags;
434
435 copytime = ddi_get_lbolt();
436
437 mutex_enter(&s_bdp->bd_mutex);
438 mlist = sbdp_memlist_dup(s_bdp->ml);
439 mutex_exit(&s_bdp->bd_mutex);
440
441 if (mlist == NULL) {
442 SBDP_DBG_MEM("Didn't find memory list\n");
443 }
444 SBDP_DBG_MEM("src\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
445 s_bdp->bd, s_bdp->wnode, s_bdp->bpa, (void *)s_bdp->nodes);
446 sbdp_memlist_dump(s_bdp->ml);
447 SBDP_DBG_MEM("tgt\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
448 t_bdp->bd, t_bdp->wnode, t_bdp->bpa, (void *)t_bdp->nodes);
449 sbdp_memlist_dump(t_bdp->ml);
450
451 /*
452 * Quiesce the OS.
453 */
454 if (sbdp_suspend(srhp)) {
455 sbd_error_t *sep;
456 cmn_err(CE_WARN, "sbdp: failed to quiesce OS for copy-rename");
457 sep = &srhp->sep;
458 sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
459 sbdp_release_sr_handle(srhp);
460 (void) sbdp_del_memlist(hp, mlist);
461 err = 1;
462 goto cleanup;
463 }
464
465 /*
466 * =================================
467 * COPY-RENAME BEGIN.
468 * =================================
469 */
470 SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
471 cph->t_bdp->bpa);
472
473 cph->ret = 0;
474
475 SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
476
477 SBDP_DBG_MEM("Flushing all of the cpu caches\n");
478 xc_all(sbdp_flush_ecache, 0, 0);
479
480 /* disable CE reporting */
481 neer = get_error_enable();
482 set_error_enable(neer & ~EN_REG_CEEN);
483
484 cr_err = (*funcp)(cph, mlist, rsp);
485
486 /* enable CE reporting */
487 set_error_enable(neer);
488
489 SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
490 cph->t_bdp->bpa);
491 SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
492 SBDP_DBG_MEM("after execking the function\n");
493
494 /*
495 * =================================
496 * COPY-RENAME END.
497 * =================================
498 */
499 SBDP_DBG_MEM("err is 0x%d\n", err);
500
501 /*
502 * Resume the OS.
503 */
504 sbdp_resume(srhp);
505 if (srhp->sep.e_code) {
506 sbd_error_t *sep;
507 cmn_err(CE_WARN,
508 "sbdp: failed to resume OS for copy-rename");
509 sep = &srhp->sep;
510 sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
511 err = 1;
512 }
513
514 copytime = ddi_get_lbolt() - copytime;
515
516 sbdp_release_sr_handle(srhp);
517 (void) sbdp_del_memlist(hp, mlist);
518
519 SBDP_DBG_MEM("copy-rename elapsed time = %ld ticks (%ld secs)\n",
520 copytime, copytime / hz);
521
522 switch (cr_err) {
523 case SBDP_CR_OK:
524 break;
525 case SBDP_CR_MC_IDLE_ERR: {
526 dev_info_t *dip;
527 pnode_t nodeid = cph->busy_mc->node;
528 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
529
530 dip = e_ddi_nodeid_to_dip(nodeid);
531
532 ASSERT(dip != NULL);
533
534 (void) ddi_pathname(dip, path);
535 ddi_release_devi(dip);
536 cmn_err(CE_WARN, "failed to idle memory controller %s: "
537 "copy-rename aborted", path);
538 kmem_free(path, MAXPATHLEN);
539 sbdp_set_err(hp->h_err, ESBD_MEMFAIL, NULL);
540 err = 1;
541 break;
542 }
543 default:
544 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
545 cmn_err(CE_WARN, "unknown copy-rename error code (%d)", cr_err);
546 err = 1;
547 break;
548 }
549
550 if (err)
551 goto cleanup;
552
553 /*
554 * Rename memory for lgroup.
555 * Source and target board numbers are packaged in arg.
556 */
557 lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
558 (uintptr_t)(s_bdp->bd | (t_bdp->bd << 16)));
559
560 /*
561 * swap list of banks
562 */
563 sbdp_swap_list_of_banks(s_bdp, t_bdp);
564
565 /*
566 * Update the cached board info for both the source and the target
567 */
568 sbdp_update_bd_info(s_bdp);
569 sbdp_update_bd_info(t_bdp);
570
571 /*
572 * Tell the sc that we have swapped slices.
573 */
574 if (sbdp_swap_slices(s_bdp->bd, t_bdp->bd) != 0) {
575 /* This is dangerous. The in use slice could be re-used! */
576 SBDP_DBG_MEM("swaping slices failed\n");
577 }
578
579 cleanup:
580 kmem_free(rsbuffer, size);
581 kmem_free(mempage, PAGESIZE);
582 kmem_free(cph, sizeof (sbdp_cr_handle_t));
583 affinity_clear();
584
585 return (err ? -1 : 0);
586 }
587
588 static int
sbdp_copy_regs(pnode_t node,uint64_t bpa,uint64_t new_base,int inval,sbdp_rename_script_t * rsp,int * index)589 sbdp_copy_regs(pnode_t node, uint64_t bpa, uint64_t new_base, int inval,
590 sbdp_rename_script_t *rsp, int *index)
591 {
592 int i, m;
593 mc_regs_t regs;
594 uint64_t *mc_decode;
595
596 if (mc_read_regs(node, ®s)) {
597 SBDP_DBG_MEM("sbdp_copy_regs: failed to read source Decode "
598 "Regs");
599 return (-1);
600 }
601
602 mc_decode = regs.mc_decode;
603
604 m = *index;
605 for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
606 uint64_t offset, seg_pa, tmp_base;
607
608 /*
609 * Skip invalid banks
610 */
611 if ((mc_decode[i] & SG_DECODE_VALID) != SG_DECODE_VALID) {
612 continue;
613 }
614
615 tmp_base = new_base;
616 if (!inval) {
617 /*
618 * We need to calculate the offset from the base pa
619 * to add it appropriately to the new_base.
620 * The offset needs to be in UM relative to the mc
621 * decode register. Since we are going from physical
622 * address to UM, we need to shift it by PHYS2UM_SHIFT.
623 * To get it ready to OR it with the MC decode reg,
624 * we need to shift it left MC_UM_SHIFT
625 */
626 seg_pa = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
627 offset = (seg_pa - bpa);
628 /* Convert tmp_base into a physical address */
629 tmp_base = (tmp_base >> MC_UM_SHIFT) << PHYS2UM_SHIFT;
630 tmp_base += offset;
631 /* Convert tmp_base to be MC reg ready */
632 tmp_base = (tmp_base >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
633 }
634
635 mc_decode[i] &= ~SG_DECODE_UM;
636 mc_decode[i] |= tmp_base;
637 mc_decode[i] |= SG_DECODE_VALID;
638
639 /*
640 * Step 1: Write source base address to the MC
641 * with present bit off.
642 */
643 rsp[m].masr_addr = mc_get_addr(node, i, &rsp[m].asi);
644 rsp[m].masr = mc_decode[i] & ~SG_DECODE_VALID;
645 m++;
646 /*
647 * Step 2: Now rewrite the mc reg with present bit on.
648 */
649 rsp[m].masr_addr = rsp[m-1].masr_addr;
650 rsp[m].masr = mc_decode[i];
651 rsp[m].asi = rsp[m-1].asi;
652 m++;
653 }
654
655 *index = m;
656 return (0);
657 }
658
659 static int
sbdp_get_reg_addr(pnode_t nodeid,uint64_t * pa)660 sbdp_get_reg_addr(pnode_t nodeid, uint64_t *pa)
661 {
662 mc_regspace reg;
663 int len;
664
665 len = prom_getproplen(nodeid, "reg");
666 if (len != sizeof (mc_regspace))
667 return (-1);
668
669 if (prom_getprop(nodeid, "reg", (caddr_t)®) < 0)
670 return (-1);
671
672 ASSERT(pa != NULL);
673
674 *pa = ((uint64_t)reg.regspec_addr_hi) << 32;
675 *pa |= (uint64_t)reg.regspec_addr_lo;
676
677 return (0);
678 }
679
680 static int
mc_get_sibling_cpu_impl(pnode_t mc_node)681 mc_get_sibling_cpu_impl(pnode_t mc_node)
682 {
683 int len, impl;
684 pnode_t cpu_node;
685 char namebuf[OBP_MAXPROPNAME];
686
687 cpu_node = mc_get_sibling_cpu(mc_node);
688 if (cpu_node == OBP_NONODE) {
689 SBDP_DBG_MEM("mc_get_sibling_cpu failed: dnode=0x%x\n",
690 mc_node);
691 return (-1);
692 }
693
694 len = prom_getproplen(cpu_node, "name");
695 if (len < 0) {
696 SBDP_DBG_MEM("invalid prom_getproplen for name prop: "
697 "len=%d, dnode=0x%x\n", len, cpu_node);
698 return (-1);
699 }
700
701 if (prom_getprop(cpu_node, "name", (caddr_t)namebuf) == -1) {
702 SBDP_DBG_MEM("failed to read name property for dnode=0x%x\n",
703 cpu_node);
704 return (-1);
705 }
706
707 /*
708 * If this is a CMP node, the child has the implementation
709 * property.
710 */
711 if (strcmp(namebuf, "cmp") == 0) {
712 cpu_node = prom_childnode(cpu_node);
713 ASSERT(cpu_node != OBP_NONODE);
714 }
715
716 if (prom_getprop(cpu_node, "implementation#", (caddr_t)&impl) == -1) {
717 SBDP_DBG_MEM("failed to read implementation# property for "
718 "dnode=0x%x\n", cpu_node);
719 return (-1);
720 }
721
722 SBDP_DBG_MEM("mc_get_sibling_cpu_impl: found impl=0x%x, dnode=0x%x\n",
723 impl, cpu_node);
724
725 return (impl);
726 }
727
728 /*
729 * Provide EMU Activity Status register ASI and address. Only valid for
730 * Panther processors.
731 */
732 static int
mc_get_idle_reg(pnode_t nodeid,uint64_t * addr,uint_t * asi)733 mc_get_idle_reg(pnode_t nodeid, uint64_t *addr, uint_t *asi)
734 {
735 int portid;
736 uint64_t reg_pa;
737
738 ASSERT(nodeid != OBP_NONODE);
739 ASSERT(mc_get_sibling_cpu_impl(nodeid) == PANTHER_IMPL);
740
741 if (prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0 ||
742 portid == -1) {
743 SBDP_DBG_MEM("mc_get_idle_reg: failed to read portid prop "
744 "for dnode=0x%x\n", nodeid);
745 return (-1);
746 }
747
748 if (sbdp_get_reg_addr(nodeid, ®_pa) != 0) {
749 SBDP_DBG_MEM("mc_get_idle_reg: failed to read reg prop "
750 "for dnode=0x%x\n", nodeid);
751 return (-1);
752 }
753
754 /*
755 * Local access will be via ASI 0x4a, otherwise via Safari PIO.
756 * This assumes the copy-rename will later run on the same proc,
757 * hence there is an assumption we are already bound.
758 */
759 ASSERT(curthread->t_bound_cpu == CPU);
760 if (SG_CPUID_TO_PORTID(CPU->cpu_id) == portid) {
761 *addr = ASI_EMU_ACT_STATUS_VA;
762 *asi = ASI_SAFARI_CONFIG;
763 } else {
764 *addr = MC_ACTIVITY_STATUS(reg_pa);
765 *asi = ASI_IO;
766 }
767
768 return (0);
769 }
770
771 /*
772 * If non-Panther board, add phys_banks entry for each physical bank.
773 * If Panther board, add mc_idle_regs entry for each EMU Activity Status
774 * register. Increment the array indices b_idx and r_idx for each entry
775 * populated by this routine.
776 *
777 * The caller is responsible for allocating sufficient array entries.
778 */
779 static int
sbdp_prep_mc_idle_one(sbdp_bd_t * bp,sbdp_rename_script_t phys_banks[],int * b_idx,sbdp_mc_idle_script_t mc_idle_regs[],int * r_idx)780 sbdp_prep_mc_idle_one(sbdp_bd_t *bp, sbdp_rename_script_t phys_banks[],
781 int *b_idx, sbdp_mc_idle_script_t mc_idle_regs[], int *r_idx)
782 {
783 int i, j;
784 pnode_t *memnodes;
785 mc_regs_t regs;
786 uint64_t addr;
787 uint_t asi;
788 sbd_cond_t sibling_cpu_cond;
789 int impl = -1;
790
791 memnodes = bp->nodes;
792
793 for (i = 0; i < SBDP_MAX_MEM_NODES_PER_BOARD; i++) {
794 if (memnodes[i] == OBP_NONODE) {
795 continue;
796 }
797
798 /* MC should not be accessed if cpu has failed */
799 sibling_cpu_cond = mc_check_sibling_cpu(memnodes[i]);
800 if (sibling_cpu_cond == SBD_COND_FAILED ||
801 sibling_cpu_cond == SBD_COND_UNUSABLE) {
802 SBDP_DBG_MEM("sbdp: skipping MC with failed cpu: "
803 "board=%d, mem node=%d, condition=%d",
804 bp->bd, i, sibling_cpu_cond);
805 continue;
806 }
807
808 /*
809 * Initialize the board cpu type, assuming all board cpus are
810 * the same type. This is true of all Cheetah-based processors.
811 * Failure to read the cpu type is considered a fatal error.
812 */
813 if (impl == -1) {
814 impl = mc_get_sibling_cpu_impl(memnodes[i]);
815 if (impl == -1) {
816 SBDP_DBG_MEM("sbdp: failed to get cpu impl "
817 "for MC dnode=0x%x\n", memnodes[i]);
818 return (-1);
819 }
820 }
821
822 switch (impl) {
823 case CHEETAH_IMPL:
824 case CHEETAH_PLUS_IMPL:
825 case JAGUAR_IMPL:
826 if (mc_read_regs(memnodes[i], ®s)) {
827 SBDP_DBG_MEM("sbdp: failed to read source "
828 "Decode Regs of board %d", bp->bd);
829 return (-1);
830 }
831
832 for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
833 uint64_t mc_decode = regs.mc_decode[j];
834
835 if ((mc_decode & SG_DECODE_VALID) !=
836 SG_DECODE_VALID) {
837 continue;
838 }
839
840 addr = (MC_BASE(mc_decode) << PHYS2UM_SHIFT) |
841 (MC_LM(mc_decode) << MC_LM_SHIFT);
842
843 phys_banks[*b_idx].masr_addr = addr;
844 phys_banks[*b_idx].masr = 0; /* unused */
845 phys_banks[*b_idx].asi = ASI_MEM;
846 (*b_idx)++;
847 }
848 break;
849 case PANTHER_IMPL:
850 if (mc_get_idle_reg(memnodes[i], &addr, &asi)) {
851 return (-1);
852 }
853
854 mc_idle_regs[*r_idx].addr = addr;
855 mc_idle_regs[*r_idx].asi = asi;
856 mc_idle_regs[*r_idx].node = memnodes[i];
857 mc_idle_regs[*r_idx].bd_id = bp->bd;
858 (*r_idx)++;
859 break;
860 default:
861 cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
862 impl);
863 ASSERT(0);
864 return (-1);
865 }
866 }
867
868 return (0);
869 }
870
871 /*
872 * For non-Panther MCs that do not support read-bypass-write, we do a read
873 * to each physical bank, relying on the reads to block until all outstanding
874 * write requests have completed. This mechanism is referred to as the bus
875 * sync list and is used for Cheetah, Cheetah+, and Jaguar processors. The
876 * bus sync list PAs for the source and target are kept together and comprise
877 * Section 1 of the rename script.
878 *
879 * For Panther processors that support the EMU Activity Status register,
880 * we ensure the writes have completed by polling the MCU_ACT_STATUS
881 * field several times to make sure the MC queues are empty. The
882 * EMU Activity Status register PAs for the source and target are
883 * kept together and comprise Section 2 of the rename script.
884 */
885 static int
sbdp_prep_mc_idle_script(sbdp_bd_t * s_bp,sbdp_bd_t * t_bp,sbdp_rename_script_t * rsp,int * rsp_idx)886 sbdp_prep_mc_idle_script(sbdp_bd_t *s_bp, sbdp_bd_t *t_bp,
887 sbdp_rename_script_t *rsp, int *rsp_idx)
888 {
889 sbdp_rename_script_t *phys_banks;
890 sbdp_mc_idle_script_t *mc_idle_regs;
891 int max_banks, max_regs;
892 size_t bsize, msize;
893 int nbanks = 0, nregs = 0;
894 int i;
895
896 /* CONSTCOND */
897 ASSERT(sizeof (sbdp_rename_script_t) ==
898 sizeof (sbdp_mc_idle_script_t));
899
900 /* allocate space for both source and target */
901 max_banks = SBDP_MAX_MEM_NODES_PER_BOARD *
902 SG_MAX_BANKS_PER_MC * 2;
903 max_regs = SBDP_MAX_MEM_NODES_PER_BOARD * 2;
904
905 bsize = sizeof (sbdp_rename_script_t) * max_banks;
906 msize = sizeof (sbdp_mc_idle_script_t) * max_regs;
907
908 phys_banks = kmem_zalloc(bsize, KM_SLEEP);
909 mc_idle_regs = kmem_zalloc(msize, KM_SLEEP);
910
911 if (sbdp_prep_mc_idle_one(t_bp, phys_banks, &nbanks,
912 mc_idle_regs, &nregs) != 0 ||
913 sbdp_prep_mc_idle_one(s_bp, phys_banks, &nbanks,
914 mc_idle_regs, &nregs) != 0) {
915 kmem_free(phys_banks, bsize);
916 kmem_free(mc_idle_regs, msize);
917 return (-1);
918 }
919
920 /* section 1 */
921 for (i = 0; i < nbanks; i++)
922 rsp[(*rsp_idx)++] = phys_banks[i];
923
924 /* section 2 */
925 for (i = 0; i < nregs; i++)
926 rsp[(*rsp_idx)++] = *(sbdp_rename_script_t *)&mc_idle_regs[i];
927
928 kmem_free(phys_banks, bsize);
929 kmem_free(mc_idle_regs, msize);
930
931 return (0);
932 }
933
934 /*
935 * code assumes single mem-unit.
936 */
937 static int
sbdp_prep_rename_script(sbdp_cr_handle_t * cph)938 sbdp_prep_rename_script(sbdp_cr_handle_t *cph)
939 {
940 pnode_t *s_nodes, *t_nodes;
941 int m = 0, i;
942 sbdp_bd_t s_bd, t_bd, *s_bdp, *t_bdp;
943 sbdp_rename_script_t *rsp;
944 uint64_t new_base, old_base, temp_base;
945 int s_num, t_num;
946
947 mutex_enter(&cph->s_bdp->bd_mutex);
948 s_bd = *cph->s_bdp;
949 mutex_exit(&cph->s_bdp->bd_mutex);
950 mutex_enter(&cph->t_bdp->bd_mutex);
951 t_bd = *cph->t_bdp;
952 mutex_exit(&cph->t_bdp->bd_mutex);
953
954 s_bdp = &s_bd;
955 t_bdp = &t_bd;
956 s_nodes = s_bdp->nodes;
957 t_nodes = t_bdp->nodes;
958 s_num = s_bdp->nnum;
959 t_num = t_bdp->nnum;
960 rsp = cph->script;
961
962 /*
963 * Calculate the new base address for the target bd
964 */
965
966 new_base = (s_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
967
968 /*
969 * Calculate the old base address for the source bd
970 */
971
972 old_base = (t_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
973
974 temp_base = SG_INVAL_UM;
975
976 SBDP_DBG_MEM("new 0x%lx old_base ox%lx temp_base 0x%lx\n", new_base,
977 old_base, temp_base);
978
979 m = 0;
980
981 /*
982 * Ensure the MC queues have been idled on the source and target
983 * following the copy.
984 */
985 if (sbdp_prep_mc_idle_script(s_bdp, t_bdp, rsp, &m) < 0)
986 return (-1);
987
988 /*
989 * Script section terminator
990 */
991 rsp[m].masr_addr = 0ull;
992 rsp[m].masr = 0;
993 rsp[m].asi = 0;
994 m++;
995
996 /*
997 * Invalidate the base in the target mc registers
998 */
999 for (i = 0; i < t_num; i++) {
1000 if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, temp_base, 1, rsp,
1001 &m) < 0)
1002 return (-1);
1003 }
1004 /*
1005 * Invalidate the base in the source mc registers
1006 */
1007 for (i = 0; i < s_num; i++) {
1008 if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, temp_base, 1, rsp,
1009 &m) < 0)
1010 return (-1);
1011 }
1012 /*
1013 * Copy the new base into the targets mc registers
1014 */
1015 for (i = 0; i < t_num; i++) {
1016 if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, new_base, 0, rsp,
1017 &m) < 0)
1018 return (-1);
1019 }
1020 /*
1021 * Copy the old base into the source mc registers
1022 */
1023 for (i = 0; i < s_num; i++) {
1024 if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, old_base, 0, rsp,
1025 &m) < 0)
1026 return (-1);
1027 }
1028 /*
1029 * Zero masr_addr value indicates the END.
1030 */
1031 rsp[m].masr_addr = 0ull;
1032 rsp[m].masr = 0;
1033 rsp[m].asi = 0;
1034 m++;
1035
1036 #ifdef DEBUG
1037 {
1038 int i;
1039
1040 SBDP_DBG_MEM("dumping copy-rename script:\n");
1041 for (i = 0; i < m; i++) {
1042 SBDP_DBG_MEM("0x%lx = 0x%lx, asi 0x%x\n",
1043 rsp[i].masr_addr, rsp[i].masr, rsp[i].asi);
1044 }
1045 DELAY(1000000);
1046 }
1047 #endif /* DEBUG */
1048
1049 return (m * sizeof (sbdp_rename_script_t));
1050 }
1051
1052 /*
1053 * EMU Activity Status Register needs to be read idle several times.
1054 * See Panther PRM 12.5.
1055 */
1056 #define SBDP_MCU_IDLE_RETRIES 10
1057 #define SBDP_MCU_IDLE_READS 3
1058
1059 /*
1060 * Using the "__relocatable" suffix informs DTrace providers (and anything
1061 * else, for that matter) that this function's text may be manually relocated
1062 * elsewhere before it is executed. That is, it cannot be safely instrumented
1063 * with any methodology that is PC-relative.
1064 */
1065 static int
sbdp_copy_rename__relocatable(sbdp_cr_handle_t * hp,struct memlist * mlist,register sbdp_rename_script_t * rsp)1066 sbdp_copy_rename__relocatable(sbdp_cr_handle_t *hp, struct memlist *mlist,
1067 register sbdp_rename_script_t *rsp)
1068 {
1069 sbdp_cr_err_t err = SBDP_CR_OK;
1070 size_t csize;
1071 size_t linesize;
1072 uint_t size;
1073 uint64_t caddr;
1074 uint64_t s_base, t_base;
1075 sbdp_bd_t *s_sbp, *t_sbp;
1076 struct memlist *ml;
1077 sbdp_mc_idle_script_t *isp;
1078 int i;
1079
1080 caddr = ecache_flushaddr;
1081 csize = (size_t)(cpunodes[CPU->cpu_id].ecache_size * 2);
1082 linesize = (size_t)(cpunodes[CPU->cpu_id].ecache_linesize);
1083
1084 size = 0;
1085 s_sbp = hp->s_bdp;
1086 t_sbp = hp->t_bdp;
1087
1088 s_base = (uint64_t)s_sbp->bpa;
1089 t_base = (uint64_t)t_sbp->bpa;
1090
1091 hp->ret = s_base;
1092 /*
1093 * DO COPY.
1094 */
1095 for (ml = mlist; ml; ml = ml->ml_next) {
1096 uint64_t s_pa, t_pa;
1097 uint64_t nbytes;
1098
1099 s_pa = ml->ml_address;
1100 t_pa = t_base + (ml->ml_address - s_base);
1101 nbytes = ml->ml_size;
1102
1103 size += nbytes;
1104 while (nbytes != 0ull) {
1105 /*
1106 * This copy does NOT use an ASI
1107 * that avoids the Ecache, therefore
1108 * the dst_pa addresses may remain
1109 * in our Ecache after the dst_pa
1110 * has been removed from the system.
1111 * A subsequent write-back to memory
1112 * will cause an ARB-stop because the
1113 * physical address no longer exists
1114 * in the system. Therefore we must
1115 * flush out local Ecache after we
1116 * finish the copy.
1117 */
1118
1119 /* copy 32 bytes at src_pa to dst_pa */
1120 bcopy32_il(s_pa, t_pa);
1121
1122 /* increment by 32 bytes */
1123 s_pa += (4 * sizeof (uint64_t));
1124 t_pa += (4 * sizeof (uint64_t));
1125
1126 /* decrement by 32 bytes */
1127 nbytes -= (4 * sizeof (uint64_t));
1128 }
1129 }
1130
1131 /*
1132 * Since bcopy32_il() does NOT use an ASI to bypass
1133 * the Ecache, we need to flush our Ecache after
1134 * the copy is complete.
1135 */
1136 flush_ecache_il(caddr, csize, linesize); /* inline version */
1137
1138 /*
1139 * Non-Panther MCs are idled by reading each physical bank.
1140 */
1141 for (i = 0; rsp[i].asi == ASI_MEM; i++) {
1142 (void) lddphys_il(rsp[i].masr_addr);
1143 }
1144
1145 isp = (sbdp_mc_idle_script_t *)&rsp[i];
1146
1147 /*
1148 * Panther MCs are idled by polling until the MCU idle state
1149 * is read SBDP_MCU_IDLE_READS times in succession.
1150 */
1151 while (isp->addr != 0ull) {
1152 for (i = 0; i < SBDP_MCU_IDLE_RETRIES; i++) {
1153 register uint64_t v;
1154 register int n_idle = 0;
1155
1156
1157 do {
1158 v = ldxasi_il(isp->addr, isp->asi) &
1159 MCU_ACT_STATUS;
1160 } while (v != MCU_ACT_STATUS &&
1161 ++n_idle < SBDP_MCU_IDLE_READS);
1162
1163 if (n_idle == SBDP_MCU_IDLE_READS)
1164 break;
1165 }
1166
1167 if (i == SBDP_MCU_IDLE_RETRIES) {
1168 /* bailout */
1169 hp->busy_mc = isp;
1170 return (SBDP_CR_MC_IDLE_ERR);
1171 }
1172
1173 isp++;
1174 }
1175
1176 /* skip terminator */
1177 isp++;
1178
1179 /*
1180 * The following inline assembly routine caches
1181 * the rename script and then caches the code that
1182 * will do the rename. This is necessary
1183 * so that we don't have any memory references during
1184 * the reprogramming. We accomplish this by first
1185 * jumping through the code to guarantee it's cached
1186 * before we actually execute it.
1187 */
1188 sbdp_exec_script_il((sbdp_rename_script_t *)isp);
1189
1190 return (err);
1191 }
1192 static void
_sbdp_copy_rename_end(void)1193 _sbdp_copy_rename_end(void)
1194 {
1195 /*
1196 * IMPORTANT: This function's location MUST be located immediately
1197 * following sbdp_copy_rename__relocatable to accurately
1198 * estimate its size. Note that this assumes (!)the
1199 * compiler keeps these functions in the order in which
1200 * they appear :-o
1201 */
1202 }
1203 int
sbdp_memory_rename(sbdp_handle_t * hp)1204 sbdp_memory_rename(sbdp_handle_t *hp)
1205 {
1206 #ifdef lint
1207 /*
1208 * Delete when implemented
1209 */
1210 hp = hp;
1211 #endif
1212 return (0);
1213 }
1214
1215
1216 /*
1217 * In Serengeti this is a nop
1218 */
1219 int
sbdp_post_configure_mem(sbdp_handle_t * hp)1220 sbdp_post_configure_mem(sbdp_handle_t *hp)
1221 {
1222 #ifdef lint
1223 hp = hp;
1224 #endif
1225 return (0);
1226 }
1227
1228 /*
1229 * In Serengeti this is a nop
1230 */
1231 int
sbdp_post_unconfigure_mem(sbdp_handle_t * hp)1232 sbdp_post_unconfigure_mem(sbdp_handle_t *hp)
1233 {
1234 #ifdef lint
1235 hp = hp;
1236 #endif
1237 return (0);
1238 }
1239
1240 /* ARGSUSED */
1241 int
sbdphw_disable_memctrl(sbdp_handle_t * hp,dev_info_t * dip)1242 sbdphw_disable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1243 {
1244 return (0);
1245 }
1246
1247 /* ARGSUSED */
1248 int
sbdphw_enable_memctrl(sbdp_handle_t * hp,dev_info_t * dip)1249 sbdphw_enable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1250 {
1251 return (0);
1252 }
1253
1254 /*
1255 * We are assuming one memory node therefore the base address is the lowest
1256 * segment possible
1257 */
1258 #define PA_ABOVE_MAX (0x8000000000000000ull)
1259 int
sbdphw_get_base_physaddr(sbdp_handle_t * hp,dev_info_t * dip,uint64_t * pa)1260 sbdphw_get_base_physaddr(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *pa)
1261 {
1262 _NOTE(ARGUNUSED(hp))
1263
1264 int i, board = -1, wnode;
1265 pnode_t nodeid;
1266 struct mem_arg arg = {0};
1267 uint64_t seg_pa, tmp_pa;
1268 dev_info_t *list[SBDP_MAX_MEM_NODES_PER_BOARD];
1269 int rc;
1270
1271 if (dip == NULL)
1272 return (-1);
1273
1274 nodeid = ddi_get_nodeid(dip);
1275
1276 if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1277 return (-1);
1278
1279 list[0] = NULL;
1280 arg.board = board;
1281 arg.list = list;
1282
1283 (void) sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
1284
1285 if (arg.ndips <= 0)
1286 return (-1);
1287
1288 seg_pa = PA_ABOVE_MAX;
1289
1290 rc = -1;
1291 for (i = 0; i < arg.ndips; i++) {
1292 if (list[i] == NULL)
1293 continue;
1294 if (sbdp_get_lowest_addr_in_node(ddi_get_nodeid(list[i]),
1295 &tmp_pa) == 0) {
1296 rc = 0;
1297 if (tmp_pa < seg_pa)
1298 seg_pa = tmp_pa;
1299 }
1300
1301 /*
1302 * Release hold acquired in sbdp_get_mem_dip()
1303 */
1304 ddi_release_devi(list[i]);
1305 }
1306
1307 if (rc == 0)
1308 *pa = seg_pa;
1309 else {
1310 /*
1311 * Record the fact that an error has occurred
1312 */
1313 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1314 }
1315
1316 return (rc);
1317 }
1318
1319 static int
sbdp_get_lowest_addr_in_node(pnode_t node,uint64_t * pa)1320 sbdp_get_lowest_addr_in_node(pnode_t node, uint64_t *pa)
1321 {
1322 uint64_t mc_decode, seg_pa, tmp_pa;
1323 mc_regs_t mc_regs, *mc_regsp = &mc_regs;
1324 int i, valid;
1325 int rc;
1326
1327
1328 seg_pa = PA_ABOVE_MAX;
1329
1330 if (mc_read_regs(node, mc_regsp)) {
1331 SBDP_DBG_MEM("sbdp_get_lowest_addr_in_node: failed to "
1332 "read source Decode Regs\n");
1333 return (-1);
1334 }
1335
1336 rc = -1;
1337 for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1338 mc_decode = mc_regsp->mc_decode[i];
1339 valid = mc_decode >> MC_VALID_SHIFT;
1340 tmp_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1341 if (valid)
1342 rc = 0;
1343 if (valid && (tmp_pa < seg_pa))
1344 seg_pa = tmp_pa;
1345 }
1346
1347 if (rc == 0)
1348 *pa = seg_pa;
1349
1350 return (rc);
1351 }
1352
1353 int
sbdp_is_mem(pnode_t node,void * arg)1354 sbdp_is_mem(pnode_t node, void *arg)
1355 {
1356 mem_op_t *memp = (mem_op_t *)arg;
1357 char type[OBP_MAXPROPNAME];
1358 int bd;
1359 pnode_t *list;
1360 int board;
1361 char name[OBP_MAXDRVNAME];
1362 int len;
1363
1364 ASSERT(memp);
1365
1366 list = memp->nodes;
1367 board = memp->board;
1368
1369 /*
1370 * Make sure that this node doesn't have its status
1371 * as failed
1372 */
1373 if (sbdp_get_comp_status(node) != SBD_COND_OK) {
1374 return (DDI_FAILURE);
1375 }
1376
1377 len = prom_getproplen(node, "device_type");
1378 if ((len > 0) && (len < OBP_MAXPROPNAME))
1379 (void) prom_getprop(node, "device_type", (caddr_t)type);
1380 else
1381 type[0] = '\0';
1382
1383 if (strcmp(type, "memory-controller") == 0) {
1384 int wnode;
1385
1386 if (sbdp_get_bd_and_wnode_num(node, &bd, &wnode) < 0)
1387 return (DDI_FAILURE);
1388
1389 if (bd == board) {
1390 /*
1391 * Make sure we don't overwrite the array
1392 */
1393 if (memp->nmem >= SBDP_MAX_MEM_NODES_PER_BOARD)
1394 return (DDI_FAILURE);
1395 (void) prom_getprop(node, OBP_NAME, (caddr_t)name);
1396 SBDP_DBG_MEM("name %s boot bd %d board %d\n", name,
1397 board, bd);
1398 list[memp->nmem++] = node;
1399 return (DDI_SUCCESS);
1400 }
1401 }
1402
1403 return (DDI_FAILURE);
1404 }
1405
1406 static int
sbdp_get_meminfo(pnode_t nodeid,int mc,uint64_t * size,uint64_t * base_pa)1407 sbdp_get_meminfo(pnode_t nodeid, int mc, uint64_t *size, uint64_t *base_pa)
1408 {
1409 int board, wnode;
1410 int valid;
1411 mc_regs_t mc_regs, *mc_regsp = &mc_regs;
1412 uint64_t mc_decode = 0;
1413
1414 if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1415 return (-1);
1416
1417 if (mc_read_regs(nodeid, mc_regsp)) {
1418 SBDP_DBG_MEM("sbdp_get_meminfo: failed to read source "
1419 "Decode Regs");
1420 return (-1);
1421 }
1422 /*
1423 * Calculate memory size
1424 */
1425 mc_decode = mc_regsp->mc_decode[mc];
1426
1427 /*
1428 * Check the valid bit to see if bank is there
1429 */
1430 valid = mc_decode >> MC_VALID_SHIFT;
1431 if (valid) {
1432 *size = MC_UK2SPAN(mc_decode);
1433 *base_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1434 }
1435
1436 return (0);
1437 }
1438
1439
1440 /*
1441 * Luckily for us mem nodes and cpu/CMP nodes are siblings. All we need to
1442 * do is search in the same branch as the mem node for its sibling cpu or
1443 * CMP node.
1444 */
1445 pnode_t
mc_get_sibling_cpu(pnode_t nodeid)1446 mc_get_sibling_cpu(pnode_t nodeid)
1447 {
1448 int portid;
1449
1450 if (prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid) < 0)
1451 return (OBP_NONODE);
1452
1453 /*
1454 * cpus and memory are siblings so we don't need to traverse
1455 * the whole tree, just a branch
1456 */
1457 return (sbdp_find_nearby_cpu_by_portid(nodeid, portid));
1458 }
1459
1460 /*
1461 * Given a memory node, check it's sibling cpu or CMP to see if
1462 * access to mem will be ok. We need to search for the node and
1463 * if found get its condition.
1464 */
1465 sbd_cond_t
mc_check_sibling_cpu(pnode_t nodeid)1466 mc_check_sibling_cpu(pnode_t nodeid)
1467 {
1468 pnode_t cpu_node;
1469 sbd_cond_t cond;
1470 int i;
1471
1472 cpu_node = mc_get_sibling_cpu(nodeid);
1473
1474 cond = sbdp_get_comp_status(cpu_node);
1475
1476 if (cond == SBD_COND_OK) {
1477 int wnode;
1478 int bd;
1479 int unit;
1480 int portid;
1481
1482 if (sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) < 0)
1483 return (SBD_COND_UNKNOWN);
1484
1485 (void) prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid);
1486
1487 /*
1488 * Access to the memory controller should not
1489 * be attempted if any of the cores are marked
1490 * as being in reset.
1491 */
1492 for (i = 0; i < SBDP_MAX_CORES_PER_CMP; i++) {
1493 unit = SG_PORTID_TO_CPU_UNIT(portid, i);
1494 if (sbdp_is_cpu_present(wnode, bd, unit) &&
1495 sbdp_is_cpu_in_reset(wnode, bd, unit)) {
1496 cond = SBD_COND_UNUSABLE;
1497 break;
1498 }
1499 }
1500 }
1501
1502 return (cond);
1503 }
1504
1505 int
mc_read_regs(pnode_t nodeid,mc_regs_t * mc_regsp)1506 mc_read_regs(pnode_t nodeid, mc_regs_t *mc_regsp)
1507 {
1508 int len;
1509 uint64_t mc_addr, mask;
1510 mc_regspace reg;
1511 sbd_cond_t sibling_cpu_cond;
1512 int local_mc;
1513 int portid;
1514 int i;
1515
1516 if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1517 (portid == -1))
1518 return (-1);
1519
1520 /*
1521 * mc should not be accessed if their corresponding cpu
1522 * has failed.
1523 */
1524 sibling_cpu_cond = mc_check_sibling_cpu(nodeid);
1525
1526 if ((sibling_cpu_cond == SBD_COND_FAILED) ||
1527 (sibling_cpu_cond == SBD_COND_UNUSABLE)) {
1528 return (-1);
1529 }
1530
1531 len = prom_getproplen(nodeid, "reg");
1532 if (len != sizeof (mc_regspace))
1533 return (-1);
1534
1535 if (prom_getprop(nodeid, "reg", (caddr_t)®) < 0)
1536 return (-1);
1537
1538 mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1539 mc_addr |= (uint64_t)reg.regspec_addr_lo;
1540
1541 /*
1542 * Make sure we don't switch cpus
1543 */
1544 affinity_set(CPU_CURRENT);
1545 if (portid == cpunodes[CPU->cpu_id].portid)
1546 local_mc = 1;
1547 else
1548 local_mc = 0;
1549
1550 for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
1551 mask = SG_REG_2_OFFSET(i);
1552
1553 /*
1554 * If the memory controller is local to this CPU, we use
1555 * the special ASI to read the decode registers.
1556 * Otherwise, we load the values from a magic address in
1557 * I/O space.
1558 */
1559 if (local_mc) {
1560 mc_regsp->mc_decode[i] = lddmcdecode(
1561 mask & MC_OFFSET_MASK);
1562 } else {
1563 mc_regsp->mc_decode[i] = lddphysio(
1564 (mc_addr | mask));
1565 }
1566 }
1567 affinity_clear();
1568
1569 return (0);
1570 }
1571
1572 uint64_t
mc_get_addr(pnode_t nodeid,int mc,uint_t * asi)1573 mc_get_addr(pnode_t nodeid, int mc, uint_t *asi)
1574 {
1575 int len;
1576 uint64_t mc_addr, addr;
1577 mc_regspace reg;
1578 int portid;
1579 int local_mc;
1580
1581 if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1582 (portid == -1))
1583 return (-1);
1584
1585 len = prom_getproplen(nodeid, "reg");
1586 if (len != sizeof (mc_regspace))
1587 return (-1);
1588
1589 if (prom_getprop(nodeid, "reg", (caddr_t)®) < 0)
1590 return (-1);
1591
1592 mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1593 mc_addr |= (uint64_t)reg.regspec_addr_lo;
1594
1595 /*
1596 * Make sure we don't switch cpus
1597 */
1598 affinity_set(CPU_CURRENT);
1599 if (portid == cpunodes[CPU->cpu_id].portid)
1600 local_mc = 1;
1601 else
1602 local_mc = 0;
1603
1604 if (local_mc) {
1605 *asi = ASI_MC_DECODE;
1606 addr = SG_REG_2_OFFSET(mc) & MC_OFFSET_MASK;
1607 } else {
1608 *asi = ASI_IO;
1609 addr = SG_REG_2_OFFSET(mc) | mc_addr;
1610 }
1611 affinity_clear();
1612
1613 return (addr);
1614 }
1615
1616 /* ARGSUSED */
1617 int
sbdp_mem_add_span(sbdp_handle_t * hp,uint64_t address,uint64_t size)1618 sbdp_mem_add_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1619 {
1620 return (0);
1621 }
1622
1623 int
sbdp_mem_del_span(sbdp_handle_t * hp,uint64_t address,uint64_t size)1624 sbdp_mem_del_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1625 {
1626 pfn_t basepfn = (pfn_t)(address >> PAGESHIFT);
1627 pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT);
1628
1629 if (size > 0) {
1630 int rv;
1631 rv = kcage_range_delete_post_mem_del(basepfn, npages);
1632 if (rv != 0) {
1633 cmn_err(CE_WARN,
1634 "unexpected kcage_range_delete_post_mem_del"
1635 " return value %d", rv);
1636 sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1637 return (-1);
1638 }
1639 }
1640 return (0);
1641 }
1642
1643 /*
1644 * This routine gets the size including the
1645 * bad banks
1646 */
1647 int
sbdp_get_mem_size(sbdp_handle_t * hp)1648 sbdp_get_mem_size(sbdp_handle_t *hp)
1649 {
1650 uint64_t size = 0;
1651 struct memlist *mlist, *ml;
1652
1653 mlist = sbdp_get_memlist(hp, (dev_info_t *)NULL);
1654
1655 for (ml = mlist; ml; ml = ml->ml_next)
1656 size += ml->ml_size;
1657
1658 (void) sbdp_del_memlist(hp, mlist);
1659
1660 SBDP_DBG_MEM("sbdp_get_mem_size: size 0x%" PRIx64 "\n", size);
1661
1662 return (btop(size));
1663 }
1664
1665 /*
1666 * This function compares the list of banks passed with the banks
1667 * in the segment
1668 */
1669 int
sbdp_check_seg_with_banks(sbdp_seg_t * seg,sbdp_bank_t * banks)1670 sbdp_check_seg_with_banks(sbdp_seg_t *seg, sbdp_bank_t *banks)
1671 {
1672 sbdp_bank_t *cur_bank, *bank;
1673 int i = 0;
1674
1675 for (cur_bank = seg->banks; cur_bank; cur_bank = cur_bank->seg_next) {
1676 for (bank = banks; bank; bank = bank->bd_next) {
1677 if (!bank->valid)
1678 continue;
1679
1680 if (cur_bank == bank) {
1681 i++;
1682 }
1683 }
1684 }
1685
1686 SBDP_DBG_MEM("banks found = %d total banks = %d\n", i, seg->nbanks);
1687 /*
1688 * If we find the same num of banks that are equal, then this segment
1689 * is not interleaved across boards
1690 */
1691 if (i == seg->nbanks)
1692 return (0);
1693
1694 return (1);
1695 }
1696
1697
1698 /*
1699 * This routine determines if any of the memory banks on the board
1700 * participate in across board memory interleaving
1701 */
1702 int
sbdp_isinterleaved(sbdp_handle_t * hp,dev_info_t * dip)1703 sbdp_isinterleaved(sbdp_handle_t *hp, dev_info_t *dip)
1704 {
1705 _NOTE(ARGUNUSED(dip))
1706
1707 sbdp_bank_t *bankp;
1708 int wnode, board;
1709 int is_interleave = 0;
1710 sbdp_bd_t *bdp;
1711 uint64_t base;
1712 sbdp_seg_t *seg;
1713
1714 board = hp->h_board;
1715 wnode = hp->h_wnode;
1716
1717 #ifdef DEBUG
1718 sbdp_print_all_segs();
1719 #endif
1720 /*
1721 * Get the banks for this board
1722 */
1723 bdp = sbdp_get_bd_info(wnode, board);
1724
1725 if (bdp == NULL)
1726 return (-1);
1727
1728 /*
1729 * Search for the first bank with valid memory
1730 */
1731 for (bankp = bdp->banks; bankp; bankp = bankp->bd_next)
1732 if (bankp->valid)
1733 break;
1734
1735 /*
1736 * If there are no banks in the board, then the board is
1737 * not interleaved across boards
1738 */
1739 if (bankp == NULL) {
1740 return (0);
1741 }
1742
1743 base = bankp->um & ~(bankp->uk);
1744
1745 /*
1746 * Find the segment for the first bank
1747 */
1748 if ((seg = sbdp_get_seg(base)) == NULL) {
1749 /*
1750 * Something bad has happened.
1751 */
1752 return (-1);
1753 }
1754 /*
1755 * Make sure that this segment is only composed of the banks
1756 * in this board. If one is missing or we have an extra one
1757 * the board is interleaved across boards
1758 */
1759 is_interleave = sbdp_check_seg_with_banks(seg, bdp->banks);
1760
1761 SBDP_DBG_MEM("interleave is %d\n", is_interleave);
1762
1763 return (is_interleave);
1764 }
1765
1766
1767 /*
1768 * Each node has 4 logical banks. This routine adds all the banks (including
1769 * the invalid ones to the passed list. Note that we use the bd list and not
1770 * the seg list
1771 */
1772 int
sbdp_add_nodes_banks(pnode_t node,sbdp_bank_t ** banks)1773 sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks)
1774 {
1775 int i;
1776 mc_regs_t regs;
1777 uint64_t *mc_decode;
1778 sbdp_bank_t *bank;
1779
1780 if (mc_read_regs(node, ®s) == -1)
1781 return (-1);
1782
1783 mc_decode = regs.mc_decode;
1784
1785 for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1786 /*
1787 * This creates the mem for the new member of the list
1788 */
1789 sbdp_fill_bank_info(mc_decode[i], &bank);
1790
1791 SBDP_DBG_MEM("adding bank %d\n", bank->id);
1792
1793 /*
1794 * Insert bank into the beginning of the list
1795 */
1796 bank->bd_next = *banks;
1797 *banks = bank;
1798
1799 /*
1800 * Add this bank into its corresponding
1801 * segment
1802 */
1803 sbdp_add_bank_to_seg(bank);
1804 }
1805 return (0);
1806 }
1807
1808 /*
1809 * given the info, create a new bank node and set the info
1810 * as appropriate. We allocate the memory for the bank. It is
1811 * up to the caller to ensure the mem is freed
1812 */
1813 void
sbdp_fill_bank_info(uint64_t mc_decode,sbdp_bank_t ** bank)1814 sbdp_fill_bank_info(uint64_t mc_decode, sbdp_bank_t **bank)
1815 {
1816 static int id = 0;
1817 sbdp_bank_t *new;
1818
1819 new = kmem_zalloc(sizeof (sbdp_bank_t), KM_SLEEP);
1820
1821 new->id = id++;
1822 new->valid = (mc_decode >> MC_VALID_SHIFT);
1823 new->uk = MC_UK(mc_decode);
1824 new->um = MC_UM(mc_decode);
1825 new->lk = MC_LK(mc_decode);
1826 new->lm = MC_LM(mc_decode);
1827 new->bd_next = NULL;
1828 new->seg_next = NULL;
1829
1830 *bank = new;
1831 }
1832
1833 /*
1834 * Each bd has the potential of having mem banks on it. The banks
1835 * may be empty or not. This routine gets all the mem banks
1836 * for this bd
1837 */
1838 void
sbdp_init_bd_banks(sbdp_bd_t * bdp)1839 sbdp_init_bd_banks(sbdp_bd_t *bdp)
1840 {
1841 int i, nmem;
1842 pnode_t *lists;
1843
1844 lists = bdp->nodes;
1845 nmem = bdp->nnum;
1846
1847 if (bdp->banks != NULL) {
1848 return;
1849 }
1850
1851 bdp->banks = NULL;
1852
1853 for (i = 0; i < nmem; i++) {
1854 (void) sbdp_add_nodes_banks(lists[i], &bdp->banks);
1855 }
1856 }
1857
1858 /*
1859 * swap the list of banks for the 2 boards
1860 */
1861 void
sbdp_swap_list_of_banks(sbdp_bd_t * bdp1,sbdp_bd_t * bdp2)1862 sbdp_swap_list_of_banks(sbdp_bd_t *bdp1, sbdp_bd_t *bdp2)
1863 {
1864 sbdp_bank_t *tmp_ptr;
1865
1866 if ((bdp1 == NULL) || (bdp2 == NULL))
1867 return;
1868
1869 tmp_ptr = bdp1->banks;
1870 bdp1->banks = bdp2->banks;
1871 bdp2->banks = tmp_ptr;
1872 }
1873
1874 /*
1875 * free all the banks on the board. Note that a bank node belongs
1876 * to 2 lists. The first list is the board list. The second one is
1877 * the seg list. We only need to remove the bank from both lists but only
1878 * free the node once.
1879 */
1880 void
sbdp_fini_bd_banks(sbdp_bd_t * bdp)1881 sbdp_fini_bd_banks(sbdp_bd_t *bdp)
1882 {
1883 sbdp_bank_t *bkp, *nbkp;
1884
1885 for (bkp = bdp->banks; bkp; ) {
1886 /*
1887 * Remove the bank from the seg list first
1888 */
1889 SBDP_DBG_MEM("Removing bank %d\n", bkp->id);
1890 sbdp_remove_bank_from_seg(bkp);
1891 nbkp = bkp->bd_next;
1892 bkp->bd_next = NULL;
1893 kmem_free(bkp, sizeof (sbdp_bank_t));
1894
1895 bkp = nbkp;
1896 }
1897 bdp->banks = NULL;
1898 }
1899
1900 #ifdef DEBUG
1901 void
sbdp_print_bd_banks(sbdp_bd_t * bdp)1902 sbdp_print_bd_banks(sbdp_bd_t *bdp)
1903 {
1904 sbdp_bank_t *bp;
1905 int i;
1906
1907 SBDP_DBG_MEM("BOARD %d\n", bdp->bd);
1908
1909 for (bp = bdp->banks, i = 0; bp; bp = bp->bd_next, i++) {
1910 SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1911 SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1912 "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1913 bp->lk, bp->lm);
1914 }
1915 }
1916
1917 void
sbdp_print_all_segs(void)1918 sbdp_print_all_segs(void)
1919 {
1920 sbdp_seg_t *cur_seg;
1921
1922 for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next)
1923 sbdp_print_seg(cur_seg);
1924 }
1925
1926 void
sbdp_print_seg(sbdp_seg_t * seg)1927 sbdp_print_seg(sbdp_seg_t *seg)
1928 {
1929 sbdp_bank_t *bp;
1930 int i;
1931
1932 SBDP_DBG_MEM("SEG %d\n", seg->id);
1933
1934 for (bp = seg->banks, i = 0; bp; bp = bp->seg_next, i++) {
1935 SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1936 SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1937 "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1938 bp->lk, bp->lm);
1939 }
1940 }
1941 #endif
1942
1943 void
sbdp_add_bank_to_seg(sbdp_bank_t * bank)1944 sbdp_add_bank_to_seg(sbdp_bank_t *bank)
1945 {
1946 uint64_t base;
1947 sbdp_seg_t *cur_seg;
1948 static int id = 0;
1949
1950 /*
1951 * if we got an invalid bank just skip it
1952 */
1953 if (bank == NULL || !bank->valid)
1954 return;
1955 base = bank->um & ~(bank->uk);
1956
1957 if ((cur_seg = sbdp_get_seg(base)) == NULL) {
1958 /*
1959 * This bank is part of a new segment, so create
1960 * a struct for it and added to the list of segments
1961 */
1962 cur_seg = kmem_zalloc(sizeof (sbdp_seg_t), KM_SLEEP);
1963 cur_seg->id = id++;
1964 cur_seg->base = base;
1965 cur_seg->size = ((bank->uk +1) << PHYS2UM_SHIFT);
1966 cur_seg->intlv = ((bank->lk ^ 0xF) + 1);
1967 /*
1968 * add to the seg list
1969 */
1970 cur_seg->next = sys_seg;
1971 sys_seg = cur_seg;
1972 }
1973
1974 cur_seg->nbanks++;
1975 /*
1976 * add bank into segs bank list. Note we add at the head
1977 */
1978 bank->seg_next = cur_seg->banks;
1979 cur_seg->banks = bank;
1980 }
1981
1982 /*
1983 * Remove this segment from the seg list
1984 */
1985 void
sbdp_rm_seg(sbdp_seg_t * seg)1986 sbdp_rm_seg(sbdp_seg_t *seg)
1987 {
1988 sbdp_seg_t **curpp, *curp;
1989
1990 curpp = &sys_seg;
1991
1992 while ((curp = *curpp) != NULL) {
1993 if (curp == seg) {
1994 *curpp = curp->next;
1995 break;
1996 }
1997 curpp = &curp->next;
1998 }
1999
2000 if (curp != NULL) {
2001 kmem_free(curp, sizeof (sbdp_seg_t));
2002 curp = NULL;
2003 }
2004 }
2005
2006 /*
2007 * remove this bank from its seg list
2008 */
2009 void
sbdp_remove_bank_from_seg(sbdp_bank_t * bank)2010 sbdp_remove_bank_from_seg(sbdp_bank_t *bank)
2011 {
2012 uint64_t base;
2013 sbdp_seg_t *cur_seg;
2014 sbdp_bank_t **curpp, *curp;
2015
2016 /*
2017 * if we got an invalid bank just skip it
2018 */
2019 if (bank == NULL || !bank->valid)
2020 return;
2021 base = bank->um & ~(bank->uk);
2022
2023 /*
2024 * If the bank doesn't belong to any seg just return
2025 */
2026 if ((cur_seg = sbdp_get_seg(base)) == NULL) {
2027 SBDP_DBG_MEM("bank %d with no segment\n", bank->id);
2028 return;
2029 }
2030
2031 /*
2032 * Find bank in the seg
2033 */
2034 curpp = &cur_seg->banks;
2035
2036 while ((curp = *curpp) != NULL) {
2037 if (curp->id == bank->id) {
2038 /*
2039 * found node, remove it
2040 */
2041 *curpp = curp->seg_next;
2042 break;
2043 }
2044 curpp = &curp->seg_next;
2045 }
2046
2047 if (curp != NULL) {
2048 cur_seg->nbanks--;
2049 }
2050
2051 if (cur_seg->nbanks == 0) {
2052 /*
2053 * No banks left on this segment, remove the segment
2054 */
2055 SBDP_DBG_MEM("No banks left in this segment, removing it\n");
2056 sbdp_rm_seg(cur_seg);
2057 }
2058 }
2059
2060 sbdp_seg_t *
sbdp_get_seg(uint64_t base)2061 sbdp_get_seg(uint64_t base)
2062 {
2063 sbdp_seg_t *cur_seg;
2064
2065 for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next) {
2066 if (cur_seg-> base == base)
2067 break;
2068 }
2069
2070 return (cur_seg);
2071 }
2072
2073 #ifdef DEBUG
2074 int
sbdp_passthru_readmem(sbdp_handle_t * hp,void * arg)2075 sbdp_passthru_readmem(sbdp_handle_t *hp, void *arg)
2076 {
2077 _NOTE(ARGUNUSED(hp))
2078 _NOTE(ARGUNUSED(arg))
2079
2080 struct memlist *ml;
2081 uint64_t src_pa;
2082 uint64_t dst_pa;
2083 uint64_t dst;
2084
2085
2086 dst_pa = va_to_pa(&dst);
2087
2088 memlist_read_lock();
2089 for (ml = phys_install; ml; ml = ml->ml_next) {
2090 uint64_t nbytes;
2091
2092 src_pa = ml->ml_address;
2093 nbytes = ml->ml_size;
2094
2095 while (nbytes != 0ull) {
2096
2097 /* copy 32 bytes at src_pa to dst_pa */
2098 bcopy32_il(src_pa, dst_pa);
2099
2100 /* increment by 32 bytes */
2101 src_pa += (4 * sizeof (uint64_t));
2102
2103 /* decrement by 32 bytes */
2104 nbytes -= (4 * sizeof (uint64_t));
2105 }
2106 }
2107 memlist_read_unlock();
2108
2109 return (0);
2110 }
2111
2112 static int
isdigit(int ch)2113 isdigit(int ch)
2114 {
2115 return (ch >= '0' && ch <= '9');
2116 }
2117
2118 #define isspace(c) ((c) == ' ' || (c) == '\t' || (c) == '\n')
2119
2120 int
sbdp_strtoi(char * p,char ** pos)2121 sbdp_strtoi(char *p, char **pos)
2122 {
2123 int n;
2124 int c, neg = 0;
2125
2126 if (!isdigit(c = *p)) {
2127 while (isspace(c))
2128 c = *++p;
2129 switch (c) {
2130 case '-':
2131 neg++;
2132 /* FALLTHROUGH */
2133 case '+':
2134 c = *++p;
2135 }
2136 if (!isdigit(c)) {
2137 if (pos != NULL)
2138 *pos = p;
2139 return (0);
2140 }
2141 }
2142 for (n = '0' - c; isdigit(c = *++p); ) {
2143 n *= 10; /* two steps to avoid unnecessary overflow */
2144 n += '0' - c; /* accum neg to avoid surprises at MAX */
2145 }
2146 if (pos != NULL)
2147 *pos = p;
2148 return (neg ? n : -n);
2149 }
2150
2151 int
sbdp_passthru_prep_script(sbdp_handle_t * hp,void * arg)2152 sbdp_passthru_prep_script(sbdp_handle_t *hp, void *arg)
2153 {
2154 int board, i;
2155 sbdp_bd_t *t_bdp, *s_bdp;
2156 char *opts;
2157 int t_board;
2158 sbdp_rename_script_t *rsbuffer;
2159 sbdp_cr_handle_t *cph;
2160 int scriptlen, size;
2161
2162 opts = (char *)arg;
2163 board = hp->h_board;
2164
2165 opts += strlen("prep-script=");
2166 t_board = sbdp_strtoi(opts, NULL);
2167
2168 cph = kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
2169
2170 size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
2171 rsbuffer = kmem_zalloc(size, KM_SLEEP);
2172
2173 s_bdp = sbdp_get_bd_info(hp->h_wnode, board);
2174 t_bdp = sbdp_get_bd_info(hp->h_wnode, t_board);
2175
2176 cph->s_bdp = s_bdp;
2177 cph->t_bdp = t_bdp;
2178 cph->script = rsbuffer;
2179
2180 affinity_set(CPU_CURRENT);
2181 scriptlen = sbdp_prep_rename_script(cph);
2182
2183 if (scriptlen <= 0) {
2184 cmn_err(CE_WARN,
2185 "sbdp failed to prep for copy-rename");
2186 }
2187 prom_printf("SCRIPT from board %d to board %d ->\n", board, t_board);
2188 for (i = 0; i < (scriptlen / (sizeof (sbdp_rename_script_t))); i++) {
2189 prom_printf("0x%lx = 0x%lx, asi 0x%x\n",
2190 rsbuffer[i].masr_addr, rsbuffer[i].masr, rsbuffer[i].asi);
2191 }
2192 prom_printf("\n");
2193
2194 affinity_clear();
2195 kmem_free(rsbuffer, size);
2196 kmem_free(cph, sizeof (sbdp_cr_handle_t));
2197
2198 return (0);
2199 }
2200 #endif
2201