xref: /linux/arch/arm/mm/pmsa-v7.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * Based on linux/arch/arm/mm/nommu.c
3  *
4  * ARM PMSAv7 supporting functions.
5  */
6 
7 #include <linux/bitops.h>
8 #include <linux/memblock.h>
9 
10 #include <asm/cacheflush.h>
11 #include <asm/cp15.h>
12 #include <asm/cputype.h>
13 #include <asm/mpu.h>
14 #include <asm/sections.h>
15 
16 #include "mm.h"
17 
18 struct region {
19 	phys_addr_t base;
20 	phys_addr_t size;
21 	unsigned long subreg;
22 };
23 
24 static struct region __initdata mem[MPU_MAX_REGIONS];
25 #ifdef CONFIG_XIP_KERNEL
26 static struct region __initdata xip[MPU_MAX_REGIONS];
27 #endif
28 
29 static unsigned int __initdata mpu_min_region_order;
30 static unsigned int __initdata mpu_max_regions;
31 
32 static int __init __mpu_min_region_order(void);
33 static int __init __mpu_max_regions(void);
34 
35 #ifndef CONFIG_CPU_V7M
36 
37 #define DRBAR	__ACCESS_CP15(c6, 0, c1, 0)
38 #define IRBAR	__ACCESS_CP15(c6, 0, c1, 1)
39 #define DRSR	__ACCESS_CP15(c6, 0, c1, 2)
40 #define IRSR	__ACCESS_CP15(c6, 0, c1, 3)
41 #define DRACR	__ACCESS_CP15(c6, 0, c1, 4)
42 #define IRACR	__ACCESS_CP15(c6, 0, c1, 5)
43 #define RNGNR	__ACCESS_CP15(c6, 0, c2, 0)
44 
45 /* Region number */
46 static inline void rgnr_write(u32 v)
47 {
48 	write_sysreg(v, RNGNR);
49 }
50 
51 /* Data-side / unified region attributes */
52 
53 /* Region access control register */
54 static inline void dracr_write(u32 v)
55 {
56 	write_sysreg(v, DRACR);
57 }
58 
59 /* Region size register */
60 static inline void drsr_write(u32 v)
61 {
62 	write_sysreg(v, DRSR);
63 }
64 
65 /* Region base address register */
66 static inline void drbar_write(u32 v)
67 {
68 	write_sysreg(v, DRBAR);
69 }
70 
71 static inline u32 drbar_read(void)
72 {
73 	return read_sysreg(DRBAR);
74 }
75 /* Optional instruction-side region attributes */
76 
77 /* I-side Region access control register */
78 static inline void iracr_write(u32 v)
79 {
80 	write_sysreg(v, IRACR);
81 }
82 
83 /* I-side Region size register */
84 static inline void irsr_write(u32 v)
85 {
86 	write_sysreg(v, IRSR);
87 }
88 
89 /* I-side Region base address register */
90 static inline void irbar_write(u32 v)
91 {
92 	write_sysreg(v, IRBAR);
93 }
94 
95 static inline u32 irbar_read(void)
96 {
97 	return read_sysreg(IRBAR);
98 }
99 
100 #else
101 
102 static inline void rgnr_write(u32 v)
103 {
104 	writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR);
105 }
106 
107 /* Data-side / unified region attributes */
108 
109 /* Region access control register */
110 static inline void dracr_write(u32 v)
111 {
112 	u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0);
113 
114 	writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR);
115 }
116 
117 /* Region size register */
118 static inline void drsr_write(u32 v)
119 {
120 	u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16);
121 
122 	writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR);
123 }
124 
125 /* Region base address register */
126 static inline void drbar_write(u32 v)
127 {
128 	writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR);
129 }
130 
131 static inline u32 drbar_read(void)
132 {
133 	return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR);
134 }
135 
136 /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
137 
138 static inline void iracr_write(u32 v) {}
139 static inline void irsr_write(u32 v) {}
140 static inline void irbar_write(u32 v) {}
141 static inline unsigned long irbar_read(void) {return 0;}
142 
143 #endif
144 
145 static int __init mpu_present(void)
146 {
147 	return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
148 }
149 
150 static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
151 {
152 	unsigned long  subreg, bslots, sslots;
153 	phys_addr_t abase = base & ~(size - 1);
154 	phys_addr_t asize = base + size - abase;
155 	phys_addr_t p2size = 1 << __fls(asize);
156 	phys_addr_t bdiff, sdiff;
157 
158 	if (p2size != asize)
159 		p2size *= 2;
160 
161 	bdiff = base - abase;
162 	sdiff = p2size - asize;
163 	subreg = p2size / MPU_NR_SUBREGS;
164 
165 	if ((bdiff % subreg) || (sdiff % subreg))
166 		return false;
167 
168 	bslots = bdiff / subreg;
169 	sslots = sdiff / subreg;
170 
171 	if (bslots || sslots) {
172 		int i;
173 
174 		if (subreg < MPU_MIN_SUBREG_SIZE)
175 			return false;
176 
177 		if (bslots + sslots > MPU_NR_SUBREGS)
178 			return false;
179 
180 		for (i = 0; i < bslots; i++)
181 			_set_bit(i, &region->subreg);
182 
183 		for (i = 1; i <= sslots; i++)
184 			_set_bit(MPU_NR_SUBREGS - i, &region->subreg);
185 	}
186 
187 	region->base = abase;
188 	region->size = p2size;
189 
190 	return true;
191 }
192 
193 static int __init allocate_region(phys_addr_t base, phys_addr_t size,
194 				  unsigned int limit, struct region *regions)
195 {
196 	int count = 0;
197 	phys_addr_t diff = size;
198 	int attempts = MPU_MAX_REGIONS;
199 
200 	while (diff) {
201 		/* Try cover region as is (maybe with help of subregions) */
202 		if (try_split_region(base, size, &regions[count])) {
203 			count++;
204 			base += size;
205 			diff -= size;
206 			size = diff;
207 		} else {
208 			/*
209 			 * Maximum aligned region might overflow phys_addr_t
210 			 * if "base" is 0. Hence we keep everything below 4G
211 			 * until we take the smaller of the aligned region
212 			 * size ("asize") and rounded region size ("p2size"),
213 			 * one of which is guaranteed to be smaller than the
214 			 * maximum physical address.
215 			 */
216 			phys_addr_t asize = (base - 1) ^ base;
217 			phys_addr_t p2size = (1 <<  __fls(diff)) - 1;
218 
219 			size = asize < p2size ? asize + 1 : p2size + 1;
220 		}
221 
222 		if (count > limit)
223 			break;
224 
225 		if (!attempts)
226 			break;
227 
228 		attempts--;
229 	}
230 
231 	return count;
232 }
233 
234 /* MPU initialisation functions */
235 void __init adjust_lowmem_bounds_mpu(void)
236 {
237 	phys_addr_t  specified_mem_size = 0, total_mem_size = 0;
238 	struct memblock_region *reg;
239 	bool first = true;
240 	phys_addr_t mem_start;
241 	phys_addr_t mem_end;
242 	unsigned int mem_max_regions;
243 	int num, i;
244 
245 	if (!mpu_present())
246 		return;
247 
248 	/* Free-up MPU_PROBE_REGION */
249 	mpu_min_region_order = __mpu_min_region_order();
250 
251 	/* How many regions are supported */
252 	mpu_max_regions = __mpu_max_regions();
253 
254 	mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
255 
256 	/* We need to keep one slot for background region */
257 	mem_max_regions--;
258 
259 #ifndef CONFIG_CPU_V7M
260 	/* ... and one for vectors */
261 	mem_max_regions--;
262 #endif
263 
264 #ifdef CONFIG_XIP_KERNEL
265 	/* plus some regions to cover XIP ROM */
266 	num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
267 			      mem_max_regions, xip);
268 
269 	mem_max_regions -= num;
270 #endif
271 
272 	for_each_memblock(memory, reg) {
273 		if (first) {
274 			phys_addr_t phys_offset = PHYS_OFFSET;
275 
276 			/*
277 			 * Initially only use memory continuous from
278 			 * PHYS_OFFSET */
279 			if (reg->base != phys_offset)
280 				panic("First memory bank must be contiguous from PHYS_OFFSET");
281 
282 			mem_start = reg->base;
283 			mem_end = reg->base + reg->size;
284 			specified_mem_size = reg->size;
285 			first = false;
286 		} else {
287 			/*
288 			 * memblock auto merges contiguous blocks, remove
289 			 * all blocks afterwards in one go (we can't remove
290 			 * blocks separately while iterating)
291 			 */
292 			pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
293 				  &mem_end, &reg->base);
294 			memblock_remove(reg->base, 0 - reg->base);
295 			break;
296 		}
297 	}
298 
299 	num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
300 
301 	for (i = 0; i < num; i++) {
302 		unsigned long  subreg = mem[i].size / MPU_NR_SUBREGS;
303 
304 		total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
305 
306 		pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
307 			 &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg);
308 	}
309 
310 	if (total_mem_size != specified_mem_size) {
311 		pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
312 				&specified_mem_size, &total_mem_size);
313 		memblock_remove(mem_start + total_mem_size,
314 				specified_mem_size - total_mem_size);
315 	}
316 }
317 
318 static int __init __mpu_max_regions(void)
319 {
320 	/*
321 	 * We don't support a different number of I/D side regions so if we
322 	 * have separate instruction and data memory maps then return
323 	 * whichever side has a smaller number of supported regions.
324 	 */
325 	u32 dregions, iregions, mpuir;
326 
327 	mpuir = read_cpuid_mputype();
328 
329 	dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
330 
331 	/* Check for separate d-side and i-side memory maps */
332 	if (mpuir & MPUIR_nU)
333 		iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
334 
335 	/* Use the smallest of the two maxima */
336 	return min(dregions, iregions);
337 }
338 
339 static int __init mpu_iside_independent(void)
340 {
341 	/* MPUIR.nU specifies whether there is *not* a unified memory map */
342 	return read_cpuid_mputype() & MPUIR_nU;
343 }
344 
345 static int __init __mpu_min_region_order(void)
346 {
347 	u32 drbar_result, irbar_result;
348 
349 	/* We've kept a region free for this probing */
350 	rgnr_write(MPU_PROBE_REGION);
351 	isb();
352 	/*
353 	 * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
354 	 * region order
355 	*/
356 	drbar_write(0xFFFFFFFC);
357 	drbar_result = irbar_result = drbar_read();
358 	drbar_write(0x0);
359 	/* If the MPU is non-unified, we use the larger of the two minima*/
360 	if (mpu_iside_independent()) {
361 		irbar_write(0xFFFFFFFC);
362 		irbar_result = irbar_read();
363 		irbar_write(0x0);
364 	}
365 	isb(); /* Ensure that MPU region operations have completed */
366 	/* Return whichever result is larger */
367 
368 	return __ffs(max(drbar_result, irbar_result));
369 }
370 
371 static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
372 				   unsigned int size_order, unsigned int properties,
373 				   unsigned int subregions, bool need_flush)
374 {
375 	u32 size_data;
376 
377 	/* We kept a region free for probing resolution of MPU regions*/
378 	if (number > mpu_max_regions
379 	    || number >= MPU_MAX_REGIONS)
380 		return -ENOENT;
381 
382 	if (size_order > 32)
383 		return -ENOMEM;
384 
385 	if (size_order < mpu_min_region_order)
386 		return -ENOMEM;
387 
388 	/* Writing N to bits 5:1 (RSR_SZ)  specifies region size 2^N+1 */
389 	size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
390 	size_data |= subregions << MPU_RSR_SD;
391 
392 	if (need_flush)
393 		flush_cache_all();
394 
395 	dsb(); /* Ensure all previous data accesses occur with old mappings */
396 	rgnr_write(number);
397 	isb();
398 	drbar_write(start);
399 	dracr_write(properties);
400 	isb(); /* Propagate properties before enabling region */
401 	drsr_write(size_data);
402 
403 	/* Check for independent I-side registers */
404 	if (mpu_iside_independent()) {
405 		irbar_write(start);
406 		iracr_write(properties);
407 		isb();
408 		irsr_write(size_data);
409 	}
410 	isb();
411 
412 	/* Store region info (we treat i/d side the same, so only store d) */
413 	mpu_rgn_info.rgns[number].dracr = properties;
414 	mpu_rgn_info.rgns[number].drbar = start;
415 	mpu_rgn_info.rgns[number].drsr = size_data;
416 
417 	mpu_rgn_info.used++;
418 
419 	return 0;
420 }
421 
422 /*
423 * Set up default MPU regions, doing nothing if there is no MPU
424 */
425 void __init mpu_setup(void)
426 {
427 	int i, region = 0, err = 0;
428 
429 	if (!mpu_present())
430 		return;
431 
432 	/* Setup MPU (order is important) */
433 
434 	/* Background */
435 	err |= mpu_setup_region(region++, 0, 32,
436 				MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA,
437 				0, false);
438 
439 #ifdef CONFIG_XIP_KERNEL
440 	/* ROM */
441 	for (i = 0; i < ARRAY_SIZE(xip); i++) {
442 		/*
443                  * In case we overwrite RAM region we set earlier in
444                  * head-nommu.S (which is cachable) all subsequent
445                  * data access till we setup RAM bellow would be done
446                  * with BG region (which is uncachable), thus we need
447                  * to clean and invalidate cache.
448 		 */
449 		bool need_flush = region == MPU_RAM_REGION;
450 
451 		if (!xip[i].size)
452 			continue;
453 
454 		err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
455 					MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL,
456 					xip[i].subreg, need_flush);
457 	}
458 #endif
459 
460 	/* RAM */
461 	for (i = 0; i < ARRAY_SIZE(mem); i++) {
462 		if (!mem[i].size)
463 			continue;
464 
465 		err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
466 					MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL,
467 					mem[i].subreg, false);
468 	}
469 
470 	/* Vectors */
471 #ifndef CONFIG_CPU_V7M
472 	err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
473 				MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL,
474 				0, false);
475 #endif
476 	if (err) {
477 		panic("MPU region initialization failure! %d", err);
478 	} else {
479 		pr_info("Using ARMv7 PMSA Compliant MPU. "
480 			 "Region independence: %s, Used %d of %d regions\n",
481 			mpu_iside_independent() ? "Yes" : "No",
482 			mpu_rgn_info.used, mpu_max_regions);
483 	}
484 }
485