1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Starfire Post Descriptor Array (post2obp) management.
31 */
32
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/cpuvar.h>
37 #include <sys/dditypes.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/kmem.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/vm.h>
44 #include <vm/seg.h>
45 #include <vm/seg_kmem.h>
46 #include <vm/seg_kp.h>
47 #include <sys/machsystm.h>
48 #include <sys/starfire.h>
49
50 #include <sys/cpu_sgnblk_defs.h>
51 #include <sys/pda.h>
52 #include <sys/cpu_sgn.h>
53
54 extern struct cpu *SIGBCPU;
55 extern cpu_sgnblk_t *cpu_sgnblkp[];
56
57 extern uint64_t mc_get_mem_alignment();
58 extern uint64_t mc_asr_to_pa(uint_t mcreg);
59
60 static post2obp_info_t *cpu_p2o_mapin(int cpuid);
61 static void cpu_p2o_mapout(int cpuid, post2obp_info_t *p2o);
62 static void p2o_update_checksum(post2obp_info_t *p2o);
63 static uint_t p2o_calc_checksum(post2obp_info_t *p2o);
64 static void p2o_mem_sort(post2obp_info_t *p2o);
65 static void p2o_mem_coalesce(post2obp_info_t *p2o);
66
67 typedef struct {
68 post2obp_info_t *p2o_ptr;
69 int p2o_cpuid;
70 } p2o_info_t;
71
72 /*
73 * PDA management routines. Should ultimately be made
74 * accessible to other Starfire subsystems, but for
75 * now we'll leave it here.
76 */
77 pda_handle_t
pda_open()78 pda_open()
79 {
80 p2o_info_t *pip;
81
82 if (SIGBCPU == NULL) {
83 cmn_err(CE_WARN, "pda_open: SIGBCPU is NULL");
84 return (NULL);
85 }
86
87 pip = (p2o_info_t *)kmem_alloc(sizeof (p2o_info_t), KM_SLEEP);
88
89 pip->p2o_cpuid = (int)SIGBCPU->cpu_id;
90 pip->p2o_ptr = cpu_p2o_mapin(pip->p2o_cpuid);
91
92 if (pip->p2o_ptr == NULL) {
93 kmem_free((caddr_t)pip, sizeof (p2o_info_t));
94 return ((pda_handle_t)NULL);
95 } else {
96 return ((pda_handle_t)pip);
97 }
98 }
99
100 void
pda_close(pda_handle_t ph)101 pda_close(pda_handle_t ph)
102 {
103 p2o_info_t *pip;
104
105 if ((pip = (p2o_info_t *)ph) == NULL)
106 return;
107
108 cpu_p2o_mapout(pip->p2o_cpuid, pip->p2o_ptr);
109
110 kmem_free((caddr_t)pip, sizeof (p2o_info_t));
111 }
112
113 int
pda_board_present(pda_handle_t ph,int boardnum)114 pda_board_present(pda_handle_t ph, int boardnum)
115 {
116 ushort_t bda_board;
117 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
118
119 bda_board = p2o->p2o_bdinfo[boardnum].bda_board;
120
121 if ((bda_board & BDAN_MASK) != BDAN_GOOD)
122 return (0);
123 else
124 return (1);
125 }
126
127 void *
pda_get_board_info(pda_handle_t ph,int boardnum)128 pda_get_board_info(pda_handle_t ph, int boardnum)
129 {
130 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
131
132 return ((void *)&(p2o->p2o_bdinfo[boardnum]));
133 }
134
135 uint_t
pda_get_mem_size(pda_handle_t ph,int boardnum)136 pda_get_mem_size(pda_handle_t ph, int boardnum)
137 {
138 int c;
139 pgcnt_t npages;
140 uint_t asr;
141 pfn_t basepfn, endpfn;
142 uint64_t basepa, endpa;
143 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
144
145 if (boardnum == -1)
146 return (p2o->p2o_memtotal.Memt_NumPages);
147
148 asr = p2o->p2o_bdminfo[boardnum].bmda_adr;
149
150 basepa = mc_asr_to_pa(asr);
151 /*
152 * Put on MC alignment.
153 */
154 endpa = mc_get_mem_alignment();
155 basepa &= ~(endpa - 1);
156 endpa += basepa;
157 basepfn = (pfn_t)(basepa >> PAGESHIFT);
158 endpfn = (pfn_t)(endpa >> PAGESHIFT);
159
160 npages = 0;
161
162 for (c = 0; c < p2o->p2o_memtotal.Memt_NumChunks; c++) {
163 pfn_t c_basepfn, c_endpfn;
164
165 c_basepfn = (pfn_t)p2o->p2o_mchunks[c].Memc_StartAddress
166 >> (PAGESHIFT - BDA_PAGESHIFT);
167 c_endpfn = (pfn_t)p2o->p2o_mchunks[c].Memc_Size
168 >> (PAGESHIFT - BDA_PAGESHIFT);
169 c_endpfn += c_basepfn;
170
171 if ((endpfn <= c_basepfn) || (basepfn >= c_endpfn))
172 continue;
173
174 c_basepfn = MAX(c_basepfn, basepfn);
175 c_endpfn = MIN(c_endpfn, endpfn);
176 ASSERT(c_basepfn <= c_endpfn);
177
178 npages += c_endpfn - c_basepfn;
179 }
180
181 return (npages);
182 }
183
184 void
pda_mem_add_span(pda_handle_t ph,uint64_t basepa,uint64_t nbytes)185 pda_mem_add_span(pda_handle_t ph, uint64_t basepa, uint64_t nbytes)
186 {
187 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
188 int c, nchunks;
189 pfn_t a_pfn, a_npgs;
190
191 ASSERT(p2o);
192
193 nchunks = p2o->p2o_memtotal.Memt_NumChunks;
194 a_pfn = (pfn_t)(basepa >> BDA_PAGESHIFT);
195 a_npgs = (pfn_t)(nbytes >> BDA_PAGESHIFT);
196
197 for (c = 0; c < nchunks; c++) {
198 int cend;
199
200 if (a_pfn <= p2o->p2o_mchunks[c].Memc_StartAddress) {
201 for (cend = nchunks; cend > c; cend--)
202 p2o->p2o_mchunks[cend] =
203 p2o->p2o_mchunks[cend - 1];
204 break;
205 }
206 }
207 p2o->p2o_mchunks[c].Memc_StartAddress = a_pfn;
208 p2o->p2o_mchunks[c].Memc_Size = a_npgs;
209 nchunks++;
210
211 p2o->p2o_memtotal.Memt_NumChunks = nchunks;
212 p2o->p2o_memtotal.Memt_NumPages += a_npgs;
213
214 p2o_mem_sort(p2o);
215 p2o_mem_coalesce(p2o);
216 p2o_update_checksum(p2o);
217 }
218
219 void
pda_mem_del_span(pda_handle_t ph,uint64_t basepa,uint64_t nbytes)220 pda_mem_del_span(pda_handle_t ph, uint64_t basepa, uint64_t nbytes)
221 {
222 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
223 int c, o_nchunks, n_nchunks;
224 pfn_t d_pfn;
225 pgcnt_t d_npgs, npages;
226 MemChunk_t *mp, *endp;
227
228 ASSERT(p2o);
229
230 d_pfn = (pfn_t)(basepa >> BDA_PAGESHIFT);
231 d_npgs = (pgcnt_t)(nbytes >> BDA_PAGESHIFT);
232 n_nchunks = o_nchunks = p2o->p2o_memtotal.Memt_NumChunks;
233 endp = &(p2o->p2o_mchunks[o_nchunks]);
234 npages = 0;
235
236 for (c = 0; c < o_nchunks; c++) {
237 uint_t p_pfn, p_npgs;
238
239 p_pfn = p2o->p2o_mchunks[c].Memc_StartAddress;
240 p_npgs = p2o->p2o_mchunks[c].Memc_Size;
241 if (p_npgs == 0)
242 continue;
243
244 if (((d_pfn + d_npgs) <= p_pfn) ||
245 (d_pfn >= (p_pfn + p_npgs))) {
246 npages += p_npgs;
247 continue;
248 }
249
250 if (d_pfn < p_pfn) {
251 if ((d_pfn + d_npgs) >= (p_pfn + p_npgs)) {
252 /*
253 * Entire chunk goes away.
254 */
255 p_pfn = p_npgs = 0;
256 } else {
257 p_npgs -= d_pfn + d_npgs - p_pfn;
258 p_pfn = d_pfn + d_npgs;
259 }
260 } else if (d_pfn == p_pfn) {
261 if ((d_pfn + d_npgs) >= (p_pfn + p_npgs)) {
262 p_pfn = p_npgs = 0;
263 } else {
264 p_npgs -= d_npgs;
265 p_pfn += d_npgs;
266 }
267 } else {
268 if ((d_pfn + d_npgs) >= (p_pfn + p_npgs)) {
269 p_npgs = d_pfn - p_pfn;
270 npages += p_npgs;
271 } else {
272 /*
273 * Ugh, got to split a
274 * memchunk, we're going to
275 * need an extra one. It's
276 * gotten from the end.
277 */
278 endp->Memc_StartAddress = d_pfn + d_npgs;
279 endp->Memc_Size = (p_pfn + p_npgs)
280 - (d_pfn + d_npgs);
281 npages += endp->Memc_Size;
282 endp++;
283 n_nchunks++;
284 p_npgs = d_pfn - p_pfn;
285 }
286 }
287
288 p2o->p2o_mchunks[c].Memc_StartAddress = p_pfn;
289 p2o->p2o_mchunks[c].Memc_Size = p_npgs;
290 if (p_npgs == 0)
291 n_nchunks--;
292 npages += p_npgs;
293 }
294 p2o->p2o_memtotal.Memt_NumChunks = n_nchunks;
295 p2o->p2o_memtotal.Memt_NumPages = npages;
296
297 /*
298 * There is a possibility we created holes in the memchunk list
299 * due to memchunks that went away. Before we can sort and
300 * coalesce we need to "pull up" the end of the memchunk list
301 * and get rid of any holes.
302 * endp = points to the last empty memchunk entry.
303 */
304 for (mp = &(p2o->p2o_mchunks[0]); mp < endp; mp++) {
305 register MemChunk_t *mmp;
306
307 if (mp->Memc_Size)
308 continue;
309
310 for (mmp = mp; mmp < endp; mmp++)
311 *mmp = *(mmp + 1);
312 mp--;
313 endp--;
314 }
315 ASSERT(endp == &(p2o->p2o_mchunks[n_nchunks]));
316
317 p2o_mem_sort(p2o);
318 p2o_mem_coalesce(p2o);
319 p2o_update_checksum(p2o);
320 }
321
322 /*
323 * Synchonize all memory attributes (currently just MC ADRs [aka ASR])
324 * with PDA representative values for the given board. A board value
325 * of (-1) indicates all boards.
326 */
327 /*ARGSUSED*/
328 void
pda_mem_sync(pda_handle_t ph,int board,int unit)329 pda_mem_sync(pda_handle_t ph, int board, int unit)
330 {
331 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
332 register int b;
333
334 for (b = 0; b < MAX_SYSBDS; b++) {
335 if ((board != -1) && (board != b))
336 continue;
337
338 if (pda_board_present(ph, b)) {
339 uint64_t masr;
340 uint_t masr_value;
341
342 masr = STARFIRE_MC_ASR_ADDR_BOARD(b);
343 masr_value = ldphysio(masr);
344
345 p2o->p2o_bdminfo[b].bmda_adr = masr_value;
346 }
347
348 if (board == b)
349 break;
350 }
351
352 p2o_update_checksum(p2o);
353 }
354
355 void
pda_get_busmask(pda_handle_t ph,short * amask,short * dmask)356 pda_get_busmask(pda_handle_t ph, short *amask, short *dmask)
357 {
358 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
359
360 if (amask)
361 *amask = p2o ? p2o->p2o_abus_mask : 0;
362
363 if (dmask)
364 *dmask = p2o ? p2o->p2o_dbus_mask : 0;
365 }
366
367 int
pda_is_valid(pda_handle_t ph)368 pda_is_valid(pda_handle_t ph)
369 {
370 post2obp_info_t *p2o = ((p2o_info_t *)ph)->p2o_ptr;
371 uint_t csum;
372
373 if (p2o == NULL)
374 return (0);
375
376 csum = p2o_calc_checksum(p2o);
377
378 return (csum == p2o->p2o_csum);
379 }
380
381 /*
382 * Post2obp support functions below here. Internal to PDA module.
383 *
384 * p2o_update_checksum
385 *
386 * Calculate checksum for post2obp structure and insert it so
387 * when POST reads it he'll be happy.
388 */
389 static void
p2o_update_checksum(post2obp_info_t * p2o)390 p2o_update_checksum(post2obp_info_t *p2o)
391 {
392 uint_t new_csum;
393
394 ASSERT(p2o);
395
396 new_csum = p2o_calc_checksum(p2o);
397 p2o->p2o_csum = new_csum;
398 }
399
400 static uint_t
p2o_calc_checksum(post2obp_info_t * p2o)401 p2o_calc_checksum(post2obp_info_t *p2o)
402 {
403 int i, nchunks;
404 uint_t *csumptr;
405 uint_t p2o_size;
406 uint_t csum, o_csum;
407
408 ASSERT(p2o != NULL);
409
410 nchunks = p2o->p2o_memtotal.Memt_NumChunks;
411 p2o_size = sizeof (post2obp_info_t)
412 + ((nchunks - VAR_ARRAY_LEN) * sizeof (MemChunk_t));
413 p2o_size /= sizeof (uint_t);
414
415 o_csum = p2o->p2o_csum;
416 p2o->p2o_csum = 0;
417 csum = 0;
418 for (i = 0, csumptr = (uint_t *)p2o; i < p2o_size; i++)
419 csum += *csumptr++;
420 p2o->p2o_csum = o_csum;
421
422 return (-csum);
423 }
424
425 /*
426 * Sort the mchunk list in ascending order based on the
427 * Memc_StartAddress field.
428 *
429 * disclosure: This is based on the qsort() library routine.
430 */
431 static void
p2o_mem_sort(post2obp_info_t * p2o)432 p2o_mem_sort(post2obp_info_t *p2o)
433 {
434 MemChunk_t *base;
435 int nchunks;
436 uint_t c1, c2;
437 char *min, *max;
438 register char c, *i, *j, *lo, *hi;
439
440 ASSERT(p2o != NULL);
441
442 nchunks = p2o->p2o_memtotal.Memt_NumChunks;
443 base = &p2o->p2o_mchunks[0];
444
445 /* ala qsort() */
446 max = (char *)base + nchunks * sizeof (MemChunk_t);
447 hi = max;
448 for (j = lo = (char *)base; (lo += sizeof (MemChunk_t)) < hi; ) {
449 c1 = ((MemChunk_t *)j)->Memc_StartAddress;
450 c2 = ((MemChunk_t *)lo)->Memc_StartAddress;
451 if (c1 > c2)
452 j = lo;
453 }
454 if (j != (char *)base) {
455 for (i = (char *)base,
456 hi = (char *)base + sizeof (MemChunk_t);
457 /* CSTYLED */
458 i < hi;) {
459 c = *j;
460 *j++ = *i;
461 *i++ = c;
462 }
463 }
464 for (min = (char *)base;
465 /* CSTYLED */
466 (hi = min += sizeof (MemChunk_t)) < max;) {
467 do {
468 hi -= sizeof (MemChunk_t);
469 c1 = ((MemChunk_t *)hi)->Memc_StartAddress;
470 c2 = ((MemChunk_t *)min)->Memc_StartAddress;
471 } while (c1 > c2);
472 if ((hi += sizeof (MemChunk_t)) != min) {
473 for (lo = min + sizeof (MemChunk_t);
474 /* CSTYLED */
475 --lo >= min;) {
476 c = *lo;
477 for (i = j = lo;
478 (j -= sizeof (MemChunk_t)) >= hi;
479 i = j) {
480 *i = *j;
481 }
482 *i = c;
483 }
484 }
485 }
486 }
487
488 static void
p2o_mem_coalesce(post2obp_info_t * p2o)489 p2o_mem_coalesce(post2obp_info_t *p2o)
490 {
491 MemChunk_t *mc;
492 int nchunks, new_nchunks;
493 uint_t addr, size, naddr, nsize;
494 uint_t npages;
495 register int i, cp, ncp;
496
497 ASSERT(p2o != NULL);
498
499 nchunks = new_nchunks = p2o->p2o_memtotal.Memt_NumChunks;
500 mc = &p2o->p2o_mchunks[0];
501
502 for (cp = i = 0; i < (nchunks-1); i++, cp = ncp) {
503 ncp = cp + 1;
504 addr = mc[cp].Memc_StartAddress;
505 size = mc[cp].Memc_Size;
506 naddr = mc[ncp].Memc_StartAddress;
507 nsize = mc[ncp].Memc_Size;
508
509 if ((addr + size) >= naddr) {
510 uint_t overlap;
511
512 overlap = addr + size - naddr;
513 /*
514 * if (nsize < overlap) then
515 * next entry fits within the current
516 * entry so no need to update size.
517 */
518 if (nsize >= overlap) {
519 size += nsize - overlap;
520 mc[cp].Memc_Size = size;
521 }
522 bcopy((char *)&mc[ncp+1],
523 (char *)&mc[ncp],
524 (nchunks - ncp - 1) * sizeof (MemChunk_t));
525 ncp = cp;
526 new_nchunks--;
527 }
528 }
529
530 npages = 0;
531 for (i = 0; i < new_nchunks; i++)
532 npages += p2o->p2o_mchunks[i].Memc_Size;
533
534 p2o->p2o_memtotal.Memt_NumChunks = new_nchunks;
535 p2o->p2o_memtotal.Memt_NumPages = npages;
536 }
537
538 /*
539 * Mapin the the cpu's post2obp structure.
540 */
541 static post2obp_info_t *
cpu_p2o_mapin(int cpuid)542 cpu_p2o_mapin(int cpuid)
543 {
544 uint64_t cpu_p2o_physaddr;
545 uint32_t cpu_p2o_offset;
546 caddr_t cvaddr;
547 uint_t num_pages;
548 pfn_t pfn;
549
550 ASSERT(cpu_sgnblkp[cpuid] != NULL);
551 /*
552 * Construct the physical base address of the bbsram
553 * in PSI space associated with this cpu in question.
554 */
555 cpu_p2o_offset = (uint32_t)cpu_sgnblkp[cpuid]->sigb_postconfig;
556 if (cpu_p2o_offset == 0) {
557 cmn_err(CE_WARN,
558 "cpu_p2o_mapin:%d: sigb_postconfig == NULL\n",
559 cpuid);
560 return (NULL);
561 }
562 cpu_p2o_physaddr = (STARFIRE_UPAID2UPS(cpuid) | STARFIRE_PSI_BASE) +
563 (uint64_t)cpu_p2o_offset;
564 cpu_p2o_offset = (uint32_t)(cpu_p2o_physaddr & MMU_PAGEOFFSET);
565 cpu_p2o_physaddr -= (uint64_t)cpu_p2o_offset;
566
567 /*
568 * cpu_p2o_physaddr = Beginning of page containing p2o.
569 * cpu_p2o_offset = Offset within page where p2o starts.
570 */
571
572 pfn = (pfn_t)(cpu_p2o_physaddr >> MMU_PAGESHIFT);
573
574 num_pages = mmu_btopr(cpu_p2o_offset + sizeof (post2obp_info_t));
575
576 /*
577 * Map in the post2obp structure.
578 */
579 cvaddr = vmem_alloc(heap_arena, ptob(num_pages), VM_SLEEP);
580
581 hat_devload(kas.a_hat, cvaddr, ptob(num_pages),
582 pfn, PROT_READ | PROT_WRITE, HAT_LOAD_LOCK);
583
584 return ((post2obp_info_t *)(cvaddr + (ulong_t)cpu_p2o_offset));
585 }
586
587 static void
cpu_p2o_mapout(int cpuid,post2obp_info_t * p2o)588 cpu_p2o_mapout(int cpuid, post2obp_info_t *p2o)
589 {
590 ulong_t cvaddr, num_pages;
591 uint32_t cpu_p2o_offset;
592
593 ASSERT(cpu_sgnblkp[cpuid] != NULL);
594
595 cpu_p2o_offset = (uint32_t)cpu_sgnblkp[cpuid]->sigb_postconfig;
596 if (cpu_p2o_offset == 0) {
597 cmn_err(CE_WARN,
598 "cpu_p2o_mapout:%d: sigb_postconfig == NULL\n",
599 cpuid);
600 return;
601 }
602
603 cpu_p2o_offset = (uint32_t)(((STARFIRE_UPAID2UPS(cpuid) |
604 STARFIRE_PSI_BASE) +
605 (uint64_t)cpu_p2o_offset) &
606 MMU_PAGEOFFSET);
607
608 num_pages = mmu_btopr(cpu_p2o_offset + sizeof (post2obp_info_t));
609
610 cvaddr = (ulong_t)p2o - cpu_p2o_offset;
611 if (cvaddr & MMU_PAGEOFFSET) {
612 cmn_err(CE_WARN,
613 "cpu_p2o_mapout:%d: cvaddr (0x%x) not on page "
614 "boundary\n",
615 cpuid, (uint_t)cvaddr);
616 return;
617 }
618
619 hat_unload(kas.a_hat, (caddr_t)cvaddr, ptob(num_pages),
620 HAT_UNLOAD_UNLOCK);
621 vmem_free(heap_arena, (caddr_t)cvaddr, ptob(num_pages));
622 }
623