1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 2017 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Konstantin Belousov
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/smp.h>
37 #include <sys/sysctl.h>
38
39 #include <vm/vm.h>
40 #include <vm/vm_param.h>
41 #include <vm/pmap.h>
42
43 #include <machine/cputypes.h>
44 #include <machine/md_var.h>
45 #include <machine/specialreg.h>
46
47 /*
48 * Pentium Pro+ memory range operations
49 *
50 * This code will probably be impenetrable without reference to the
51 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
52 */
53
54 static char *mem_owner_bios = "BIOS";
55
56 #define MR686_FIXMTRR (1<<0)
57
58 #define mrwithin(mr, a) \
59 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
60 #define mroverlap(mra, mrb) \
61 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
62
63 #define mrvalid(base, len) \
64 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
65 ((len) >= (1 << 12)) && /* length is >= 4k */ \
66 powerof2((len)) && /* ... and power of two */ \
67 !((base) & ((len) - 1))) /* range is not discontiuous */
68
69 #define mrcopyflags(curr, new) \
70 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
71
72 static int mtrrs_disabled;
73 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
74 &mtrrs_disabled, 0,
75 "Disable MTRRs.");
76
77 static void x86_mrinit(struct mem_range_softc *sc);
78 static int x86_mrset(struct mem_range_softc *sc,
79 struct mem_range_desc *mrd, int *arg);
80 static void x86_mrAPinit(struct mem_range_softc *sc);
81 static void x86_mrreinit(struct mem_range_softc *sc);
82
83 static struct mem_range_ops x86_mrops = {
84 x86_mrinit,
85 x86_mrset,
86 x86_mrAPinit,
87 x86_mrreinit
88 };
89
90 /* XXX for AP startup hook */
91 static u_int64_t mtrrcap, mtrrdef;
92
93 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
94 static u_int64_t mtrr_physmask;
95
96 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
97 struct mem_range_desc *mrd);
98 static void x86_mrfetch(struct mem_range_softc *sc);
99 static int x86_mtrrtype(int flags);
100 static int x86_mrt2mtrr(int flags, int oldval);
101 static int x86_mtrrconflict(int flag1, int flag2);
102 static void x86_mrstore(struct mem_range_softc *sc);
103 static void x86_mrstoreone(void *arg);
104 static struct mem_range_desc *x86_mtrrfixsearch(struct mem_range_softc *sc,
105 u_int64_t addr);
106 static int x86_mrsetlow(struct mem_range_softc *sc,
107 struct mem_range_desc *mrd, int *arg);
108 static int x86_mrsetvariable(struct mem_range_softc *sc,
109 struct mem_range_desc *mrd, int *arg);
110
111 /* ia32 MTRR type to memory range type conversion */
112 static int x86_mtrrtomrt[] = {
113 MDF_UNCACHEABLE,
114 MDF_WRITECOMBINE,
115 MDF_UNKNOWN,
116 MDF_UNKNOWN,
117 MDF_WRITETHROUGH,
118 MDF_WRITEPROTECT,
119 MDF_WRITEBACK
120 };
121
122 #define MTRRTOMRTLEN nitems(x86_mtrrtomrt)
123
124 static int
x86_mtrr2mrt(int val)125 x86_mtrr2mrt(int val)
126 {
127
128 if (val < 0 || val >= MTRRTOMRTLEN)
129 return (MDF_UNKNOWN);
130 return (x86_mtrrtomrt[val]);
131 }
132
133 /*
134 * x86 MTRR conflicts. Writeback and uncachable may overlap.
135 */
136 static int
x86_mtrrconflict(int flag1,int flag2)137 x86_mtrrconflict(int flag1, int flag2)
138 {
139
140 flag1 &= MDF_ATTRMASK;
141 flag2 &= MDF_ATTRMASK;
142 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
143 return (1);
144 if (flag1 == flag2 ||
145 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
146 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
147 return (0);
148 return (1);
149 }
150
151 /*
152 * Look for an exactly-matching range.
153 */
154 static struct mem_range_desc *
mem_range_match(struct mem_range_softc * sc,struct mem_range_desc * mrd)155 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
156 {
157 struct mem_range_desc *cand;
158 int i;
159
160 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
161 if ((cand->mr_base == mrd->mr_base) &&
162 (cand->mr_len == mrd->mr_len))
163 return (cand);
164 return (NULL);
165 }
166
167 /*
168 * Ensure that the direct map region does not contain any mappings
169 * that span MTRRs of different types. However, the fixed MTRRs can
170 * be ignored, because a large page mapping the first 1 MB of physical
171 * memory is a special case that the processor handles. Invalidate
172 * any old TLB entries that might hold inconsistent memory type
173 * information.
174 */
175 static void
x86_mr_split_dmap(struct mem_range_softc * sc __unused)176 x86_mr_split_dmap(struct mem_range_softc *sc __unused)
177 {
178 #ifdef __amd64__
179 struct mem_range_desc *mrd;
180 int i;
181
182 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
183 mrd = sc->mr_desc + i;
184 for (; i < sc->mr_ndesc; i++, mrd++) {
185 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
186 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, true);
187 }
188 #endif
189 }
190
191 /*
192 * Fetch the current mtrr settings from the current CPU (assumed to
193 * all be in sync in the SMP case). Note that if we are here, we
194 * assume that MTRRs are enabled, and we may or may not have fixed
195 * MTRRs.
196 */
197 static void
x86_mrfetch(struct mem_range_softc * sc)198 x86_mrfetch(struct mem_range_softc *sc)
199 {
200 struct mem_range_desc *mrd;
201 u_int64_t msrv;
202 int i, j, msr;
203
204 mrd = sc->mr_desc;
205
206 /* Get fixed-range MTRRs. */
207 if (sc->mr_cap & MR686_FIXMTRR) {
208 msr = MSR_MTRR64kBase;
209 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
210 msrv = rdmsr(msr);
211 for (j = 0; j < 8; j++, mrd++) {
212 mrd->mr_flags =
213 (mrd->mr_flags & ~MDF_ATTRMASK) |
214 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
215 if (mrd->mr_owner[0] == 0)
216 strcpy(mrd->mr_owner, mem_owner_bios);
217 msrv = msrv >> 8;
218 }
219 }
220 msr = MSR_MTRR16kBase;
221 for (i = 0; i < MTRR_N16K / 8; i++, msr++) {
222 msrv = rdmsr(msr);
223 for (j = 0; j < 8; j++, mrd++) {
224 mrd->mr_flags =
225 (mrd->mr_flags & ~MDF_ATTRMASK) |
226 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
227 if (mrd->mr_owner[0] == 0)
228 strcpy(mrd->mr_owner, mem_owner_bios);
229 msrv = msrv >> 8;
230 }
231 }
232 msr = MSR_MTRR4kBase;
233 for (i = 0; i < MTRR_N4K / 8; i++, msr++) {
234 msrv = rdmsr(msr);
235 for (j = 0; j < 8; j++, mrd++) {
236 mrd->mr_flags =
237 (mrd->mr_flags & ~MDF_ATTRMASK) |
238 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
239 if (mrd->mr_owner[0] == 0)
240 strcpy(mrd->mr_owner, mem_owner_bios);
241 msrv = msrv >> 8;
242 }
243 }
244 }
245
246 /* Get remainder which must be variable MTRRs. */
247 msr = MSR_MTRRVarBase;
248 for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) {
249 msrv = rdmsr(msr);
250 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
251 x86_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
252 mrd->mr_base = msrv & mtrr_physmask;
253 msrv = rdmsr(msr + 1);
254 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
255 (mrd->mr_flags | MDF_ACTIVE) :
256 (mrd->mr_flags & ~MDF_ACTIVE);
257
258 /* Compute the range from the mask. Ick. */
259 mrd->mr_len = (~(msrv & mtrr_physmask) &
260 (mtrr_physmask | 0xfff)) + 1;
261 if (!mrvalid(mrd->mr_base, mrd->mr_len))
262 mrd->mr_flags |= MDF_BOGUS;
263
264 /* If unclaimed and active, must be the BIOS. */
265 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
266 strcpy(mrd->mr_owner, mem_owner_bios);
267 }
268 }
269
270 /*
271 * Return the MTRR memory type matching a region's flags
272 */
273 static int
x86_mtrrtype(int flags)274 x86_mtrrtype(int flags)
275 {
276 int i;
277
278 flags &= MDF_ATTRMASK;
279
280 for (i = 0; i < MTRRTOMRTLEN; i++) {
281 if (x86_mtrrtomrt[i] == MDF_UNKNOWN)
282 continue;
283 if (flags == x86_mtrrtomrt[i])
284 return (i);
285 }
286 return (-1);
287 }
288
289 static int
x86_mrt2mtrr(int flags,int oldval)290 x86_mrt2mtrr(int flags, int oldval)
291 {
292 int val;
293
294 if ((val = x86_mtrrtype(flags)) == -1)
295 return (oldval & 0xff);
296 return (val & 0xff);
297 }
298
299 /*
300 * Update running CPU(s) MTRRs to match the ranges in the descriptor
301 * list.
302 *
303 * Must be called with interrupts enabled.
304 */
305 static void
x86_mrstore(struct mem_range_softc * sc)306 x86_mrstore(struct mem_range_softc *sc)
307 {
308
309 smp_rendezvous(NULL, x86_mrstoreone, NULL, sc);
310 }
311
312 /*
313 * Update the current CPU's MTRRs with those represented in the
314 * descriptor list. Note that we do this wholesale rather than just
315 * stuffing one entry; this is simpler (but slower, of course).
316 */
317 static void
x86_mrstoreone(void * arg)318 x86_mrstoreone(void *arg)
319 {
320 struct mem_range_softc *sc = arg;
321 struct mem_range_desc *mrd;
322 u_int64_t omsrv, msrv;
323 int i, j, msr;
324 u_long cr0, cr4;
325
326 mrd = sc->mr_desc;
327
328 critical_enter();
329
330 /* Disable PGE. */
331 cr4 = rcr4();
332 load_cr4(cr4 & ~CR4_PGE);
333
334 /* Disable caches (CD = 1, NW = 0). */
335 cr0 = rcr0();
336 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
337
338 /* Flushes caches and TLBs. */
339 wbinvd();
340 invltlb();
341
342 /* Disable MTRRs (E = 0). */
343 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
344
345 /* Set fixed-range MTRRs. */
346 if (sc->mr_cap & MR686_FIXMTRR) {
347 msr = MSR_MTRR64kBase;
348 for (i = 0; i < MTRR_N64K / 8; i++, msr++) {
349 msrv = 0;
350 omsrv = rdmsr(msr);
351 for (j = 7; j >= 0; j--) {
352 msrv = msrv << 8;
353 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags,
354 omsrv >> (j * 8));
355 }
356 wrmsr(msr, msrv);
357 mrd += 8;
358 }
359 msr = MSR_MTRR16kBase;
360 for (i = 0; i < MTRR_N16K / 8; i++, msr++) {
361 msrv = 0;
362 omsrv = rdmsr(msr);
363 for (j = 7; j >= 0; j--) {
364 msrv = msrv << 8;
365 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags,
366 omsrv >> (j * 8));
367 }
368 wrmsr(msr, msrv);
369 mrd += 8;
370 }
371 msr = MSR_MTRR4kBase;
372 for (i = 0; i < MTRR_N4K / 8; i++, msr++) {
373 msrv = 0;
374 omsrv = rdmsr(msr);
375 for (j = 7; j >= 0; j--) {
376 msrv = msrv << 8;
377 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags,
378 omsrv >> (j * 8));
379 }
380 wrmsr(msr, msrv);
381 mrd += 8;
382 }
383 }
384
385 /* Set remainder which must be variable MTRRs. */
386 msr = MSR_MTRRVarBase;
387 for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) {
388 /* base/type register */
389 omsrv = rdmsr(msr);
390 if (mrd->mr_flags & MDF_ACTIVE) {
391 msrv = mrd->mr_base & mtrr_physmask;
392 msrv |= x86_mrt2mtrr(mrd->mr_flags, omsrv);
393 } else {
394 msrv = 0;
395 }
396 wrmsr(msr, msrv);
397
398 /* mask/active register */
399 if (mrd->mr_flags & MDF_ACTIVE) {
400 msrv = MTRR_PHYSMASK_VALID |
401 rounddown2(mtrr_physmask, mrd->mr_len);
402 } else {
403 msrv = 0;
404 }
405 wrmsr(msr + 1, msrv);
406 }
407
408 /* Flush caches and TLBs. */
409 wbinvd();
410 invltlb();
411
412 /* Enable MTRRs. */
413 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
414
415 /* Restore caches and PGE. */
416 load_cr0(cr0);
417 load_cr4(cr4);
418
419 critical_exit();
420 }
421
422 /*
423 * Hunt for the fixed MTRR referencing (addr)
424 */
425 static struct mem_range_desc *
x86_mtrrfixsearch(struct mem_range_softc * sc,u_int64_t addr)426 x86_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
427 {
428 struct mem_range_desc *mrd;
429 int i;
430
431 for (i = 0, mrd = sc->mr_desc; i < MTRR_N64K + MTRR_N16K + MTRR_N4K;
432 i++, mrd++)
433 if (addr >= mrd->mr_base &&
434 addr < mrd->mr_base + mrd->mr_len)
435 return (mrd);
436 return (NULL);
437 }
438
439 /*
440 * Try to satisfy the given range request by manipulating the fixed
441 * MTRRs that cover low memory.
442 *
443 * Note that we try to be generous here; we'll bloat the range out to
444 * the next higher/lower boundary to avoid the consumer having to know
445 * too much about the mechanisms here.
446 *
447 * XXX note that this will have to be updated when we start supporting
448 * "busy" ranges.
449 */
450 static int
x86_mrsetlow(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)451 x86_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
452 {
453 struct mem_range_desc *first_md, *last_md, *curr_md;
454
455 /* Range check. */
456 if ((first_md = x86_mtrrfixsearch(sc, mrd->mr_base)) == NULL ||
457 (last_md = x86_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1))
458 == NULL)
459 return (EINVAL);
460
461 /* Check that we aren't doing something risky. */
462 if ((mrd->mr_flags & MDF_FORCE) == 0) {
463 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
464 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
465 return (EACCES);
466 }
467 }
468
469 /* Set flags, clear set-by-firmware flag. */
470 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
471 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
472 ~MDF_FIRMWARE, mrd->mr_flags);
473 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
474 }
475
476 return (0);
477 }
478
479 /*
480 * Modify/add a variable MTRR to satisfy the request.
481 *
482 * XXX needs to be updated to properly support "busy" ranges.
483 */
484 static int
x86_mrsetvariable(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)485 x86_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
486 int *arg)
487 {
488 struct mem_range_desc *curr_md, *free_md;
489 int i;
490
491 /*
492 * Scan the currently active variable descriptors, look for
493 * one we exactly match (straight takeover) and for possible
494 * accidental overlaps.
495 *
496 * Keep track of the first empty variable descriptor in case
497 * we can't perform a takeover.
498 */
499 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
500 curr_md = sc->mr_desc + i;
501 free_md = NULL;
502 for (; i < sc->mr_ndesc; i++, curr_md++) {
503 if (curr_md->mr_flags & MDF_ACTIVE) {
504 /* Exact match? */
505 if (curr_md->mr_base == mrd->mr_base &&
506 curr_md->mr_len == mrd->mr_len) {
507 /* Whoops, owned by someone. */
508 if (curr_md->mr_flags & MDF_BUSY)
509 return (EBUSY);
510
511 /* Check that we aren't doing something risky */
512 if (!(mrd->mr_flags & MDF_FORCE) &&
513 (curr_md->mr_flags & MDF_ATTRMASK) ==
514 MDF_UNKNOWN)
515 return (EACCES);
516
517 /* Ok, just hijack this entry. */
518 free_md = curr_md;
519 break;
520 }
521
522 /* Non-exact overlap? */
523 if (mroverlap(curr_md, mrd)) {
524 /* Between conflicting region types? */
525 if (x86_mtrrconflict(curr_md->mr_flags,
526 mrd->mr_flags))
527 return (EINVAL);
528 }
529 } else if (free_md == NULL) {
530 free_md = curr_md;
531 }
532 }
533
534 /* Got somewhere to put it? */
535 if (free_md == NULL)
536 return (ENOSPC);
537
538 /* Set up new descriptor. */
539 free_md->mr_base = mrd->mr_base;
540 free_md->mr_len = mrd->mr_len;
541 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
542 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
543 return (0);
544 }
545
546 /*
547 * Handle requests to set memory range attributes by manipulating MTRRs.
548 */
549 static int
x86_mrset(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)550 x86_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
551 {
552 struct mem_range_desc *targ;
553 int error;
554
555 switch (*arg) {
556 case MEMRANGE_SET_UPDATE:
557 /*
558 * Make sure that what's being asked for is even
559 * possible at all.
560 */
561 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
562 x86_mtrrtype(mrd->mr_flags) == -1)
563 return (EINVAL);
564
565 #define FIXTOP \
566 ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
567
568 /* Are the "low memory" conditions applicable? */
569 if ((sc->mr_cap & MR686_FIXMTRR) != 0 &&
570 mrd->mr_base + mrd->mr_len <= FIXTOP) {
571 if ((error = x86_mrsetlow(sc, mrd, arg)) != 0)
572 return (error);
573 } else {
574 /* It's time to play with variable MTRRs. */
575 if ((error = x86_mrsetvariable(sc, mrd, arg)) != 0)
576 return (error);
577 }
578 break;
579
580 case MEMRANGE_SET_REMOVE:
581 if ((targ = mem_range_match(sc, mrd)) == NULL)
582 return (ENOENT);
583 if (targ->mr_flags & MDF_FIXACTIVE)
584 return (EPERM);
585 if (targ->mr_flags & MDF_BUSY)
586 return (EBUSY);
587 targ->mr_flags &= ~MDF_ACTIVE;
588 targ->mr_owner[0] = 0;
589 break;
590
591 default:
592 return (EOPNOTSUPP);
593 }
594
595 x86_mr_split_dmap(sc);
596
597 /* Update the hardware. */
598 x86_mrstore(sc);
599
600 /* Refetch to see where we're at. */
601 x86_mrfetch(sc);
602 return (0);
603 }
604
605 /*
606 * Work out how many ranges we support, initialise storage for them,
607 * and fetch the initial settings.
608 */
609 static void
x86_mrinit(struct mem_range_softc * sc)610 x86_mrinit(struct mem_range_softc *sc)
611 {
612 struct mem_range_desc *mrd;
613 int i, nmdesc;
614
615 if (sc->mr_desc != NULL)
616 /* Already initialized. */
617 return;
618
619 nmdesc = 0;
620 mtrrcap = rdmsr(MSR_MTRRcap);
621 mtrrdef = rdmsr(MSR_MTRRdefType);
622
623 /* For now, bail out if MTRRs are not enabled. */
624 if (!(mtrrdef & MTRR_DEF_ENABLE)) {
625 if (bootverbose)
626 printf("CPU supports MTRRs but not enabled\n");
627 return;
628 }
629 nmdesc = mtrrcap & MTRR_CAP_VCNT;
630 if (bootverbose)
631 printf("Pentium Pro MTRR support enabled\n");
632
633 /*
634 * Determine the size of the PhysMask and PhysBase fields in
635 * the variable range MTRRs.
636 */
637 mtrr_physmask = (((uint64_t)1 << cpu_maxphyaddr) - 1) &
638 ~(uint64_t)0xfff;
639
640 /* If fixed MTRRs supported and enabled. */
641 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
642 sc->mr_cap = MR686_FIXMTRR;
643 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
644 }
645
646 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
647 M_WAITOK | M_ZERO);
648 sc->mr_ndesc = nmdesc;
649
650 mrd = sc->mr_desc;
651
652 /* Populate the fixed MTRR entries' base/length. */
653 if (sc->mr_cap & MR686_FIXMTRR) {
654 for (i = 0; i < MTRR_N64K; i++, mrd++) {
655 mrd->mr_base = i * 0x10000;
656 mrd->mr_len = 0x10000;
657 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
658 MDF_FIXACTIVE;
659 }
660 for (i = 0; i < MTRR_N16K; i++, mrd++) {
661 mrd->mr_base = i * 0x4000 + 0x80000;
662 mrd->mr_len = 0x4000;
663 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
664 MDF_FIXACTIVE;
665 }
666 for (i = 0; i < MTRR_N4K; i++, mrd++) {
667 mrd->mr_base = i * 0x1000 + 0xc0000;
668 mrd->mr_len = 0x1000;
669 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
670 MDF_FIXACTIVE;
671 }
672 }
673
674 /*
675 * Get current settings, anything set now is considered to
676 * have been set by the firmware. (XXX has something already
677 * played here?)
678 */
679 x86_mrfetch(sc);
680 mrd = sc->mr_desc;
681 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
682 if (mrd->mr_flags & MDF_ACTIVE)
683 mrd->mr_flags |= MDF_FIRMWARE;
684 }
685
686 x86_mr_split_dmap(sc);
687 }
688
689 /*
690 * Initialise MTRRs on an AP after the BSP has run the init code.
691 */
692 static void
x86_mrAPinit(struct mem_range_softc * sc)693 x86_mrAPinit(struct mem_range_softc *sc)
694 {
695
696 x86_mrstoreone(sc);
697 wrmsr(MSR_MTRRdefType, mtrrdef);
698 }
699
700 /*
701 * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
702 * list.
703 *
704 * Must be called with interrupts enabled.
705 */
706 static void
x86_mrreinit(struct mem_range_softc * sc)707 x86_mrreinit(struct mem_range_softc *sc)
708 {
709
710 smp_rendezvous(NULL, (void (*)(void *))x86_mrAPinit, NULL, sc);
711 }
712
713 static void
x86_mem_drvinit(void * unused)714 x86_mem_drvinit(void *unused)
715 {
716
717 if (mtrrs_disabled)
718 return;
719 if (!(cpu_feature & CPUID_MTRR))
720 return;
721 mem_range_softc.mr_op = &x86_mrops;
722 x86_mrinit(&mem_range_softc);
723 }
724 SYSINIT(x86memdev, SI_SUB_CPU, SI_ORDER_ANY, x86_mem_drvinit, NULL);
725