xref: /linux/arch/arm/mm/proc-feroceon.S (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
4 *
5 *  Heavily based on proc-arm926.S
6 *  Maintainer: Assaf Hoffman <hoffman@marvell.com>
7 */
8
9#include <linux/linkage.h>
10#include <linux/init.h>
11#include <linux/cfi_types.h>
12#include <linux/pgtable.h>
13#include <asm/assembler.h>
14#include <asm/hwcap.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/page.h>
17#include <asm/ptrace.h>
18#include "proc-macros.S"
19
20/*
21 * This is the maximum size of an area which will be invalidated
22 * using the single invalidate entry instructions.  Anything larger
23 * than this, and we go for the whole cache.
24 *
25 * This value should be chosen such that we choose the cheapest
26 * alternative.
27 */
28#define CACHE_DLIMIT	16384
29
30/*
31 * the cache line size of the I and D cache
32 */
33#define CACHE_DLINESIZE	32
34
35	.bss
36	.align 3
37__cache_params_loc:
38	.space	8
39
40	.text
41__cache_params:
42	.word	__cache_params_loc
43
44/*
45 * cpu_feroceon_proc_init()
46 */
47SYM_TYPED_FUNC_START(cpu_feroceon_proc_init)
48	mrc	p15, 0, r0, c0, c0, 1		@ read cache type register
49	ldr	r1, __cache_params
50	mov	r2, #(16 << 5)
51	tst	r0, #(1 << 16)			@ get way
52	mov	r0, r0, lsr #18			@ get cache size order
53	movne	r3, #((4 - 1) << 30)		@ 4-way
54	and	r0, r0, #0xf
55	moveq	r3, #0				@ 1-way
56	mov	r2, r2, lsl r0			@ actual cache size
57	movne	r2, r2, lsr #2			@ turned into # of sets
58	sub	r2, r2, #(1 << 5)
59	stmia	r1, {r2, r3}
60#ifdef CONFIG_VFP
61	mov	r1, #1				@ disable quirky VFP
62	str_l	r1, VFP_arch_feroceon, r2
63#endif
64	ret	lr
65SYM_FUNC_END(cpu_feroceon_proc_init)
66
67/*
68 * cpu_feroceon_proc_fin()
69 */
70SYM_TYPED_FUNC_START(cpu_feroceon_proc_fin)
71#if defined(CONFIG_CACHE_FEROCEON_L2) && \
72	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
73	mov	r0, #0
74	mcr	p15, 1, r0, c15, c9, 0		@ clean L2
75	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
76#endif
77
78	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
79	bic	r0, r0, #0x1000			@ ...i............
80	bic	r0, r0, #0x000e			@ ............wca.
81	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
82	ret	lr
83SYM_FUNC_END(cpu_feroceon_proc_fin)
84
85/*
86 * cpu_feroceon_reset(loc)
87 *
88 * Perform a soft reset of the system.  Put the CPU into the
89 * same state as it would be if it had been reset, and branch
90 * to what would be the reset vector.
91 *
92 * loc: location to jump to for soft reset
93 */
94	.align	5
95	.pushsection	.idmap.text, "ax"
96SYM_TYPED_FUNC_START(cpu_feroceon_reset)
97	mov	ip, #0
98	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
99	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
100#ifdef CONFIG_MMU
101	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
102#endif
103	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
104	bic	ip, ip, #0x000f			@ ............wcam
105	bic	ip, ip, #0x1100			@ ...i...s........
106	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
107	ret	r0
108SYM_FUNC_END(cpu_feroceon_reset)
109	.popsection
110
111/*
112 * cpu_feroceon_do_idle()
113 *
114 * Called with IRQs disabled
115 */
116	.align	5
117SYM_TYPED_FUNC_START(cpu_feroceon_do_idle)
118	mov	r0, #0
119	mcr	p15, 0, r0, c7, c10, 4		@ Drain write buffer
120	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
121	ret	lr
122SYM_FUNC_END(cpu_feroceon_do_idle)
123
124/*
125 *	flush_icache_all()
126 *
127 *	Unconditionally clean and invalidate the entire icache.
128 */
129SYM_TYPED_FUNC_START(feroceon_flush_icache_all)
130	mov	r0, #0
131	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
132	ret	lr
133SYM_FUNC_END(feroceon_flush_icache_all)
134
135/*
136 *	flush_user_cache_all()
137 *
138 *	Clean and invalidate all cache entries in a particular
139 *	address space.
140 */
141	.align	5
142SYM_FUNC_ALIAS(feroceon_flush_user_cache_all, feroceon_flush_kern_cache_all)
143
144/*
145 *	flush_kern_cache_all()
146 *
147 *	Clean and invalidate the entire cache.
148 */
149SYM_TYPED_FUNC_START(feroceon_flush_kern_cache_all)
150	mov	r2, #VM_EXEC
151
152__flush_whole_cache:
153	ldr	r1, __cache_params
154	ldmia	r1, {r1, r3}
1551:	orr	ip, r1, r3
1562:	mcr	p15, 0, ip, c7, c14, 2		@ clean + invalidate D set/way
157	subs	ip, ip, #(1 << 30)		@ next way
158	bcs	2b
159	subs	r1, r1, #(1 << 5)		@ next set
160	bcs	1b
161
162	tst	r2, #VM_EXEC
163	mov	ip, #0
164	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
165	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
166	ret	lr
167SYM_FUNC_END(feroceon_flush_kern_cache_all)
168
169/*
170 *	flush_user_cache_range(start, end, flags)
171 *
172 *	Clean and invalidate a range of cache entries in the
173 *	specified address range.
174 *
175 *	- start	- start address (inclusive)
176 *	- end	- end address (exclusive)
177 *	- flags	- vm_flags describing address space
178 */
179	.align	5
180SYM_TYPED_FUNC_START(feroceon_flush_user_cache_range)
181	sub	r3, r1, r0			@ calculate total size
182	cmp	r3, #CACHE_DLIMIT
183	bgt	__flush_whole_cache
1841:	tst	r2, #VM_EXEC
185	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
186	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
187	add	r0, r0, #CACHE_DLINESIZE
188	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
189	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
190	add	r0, r0, #CACHE_DLINESIZE
191	cmp	r0, r1
192	blo	1b
193	tst	r2, #VM_EXEC
194	mov	ip, #0
195	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
196	ret	lr
197SYM_FUNC_END(feroceon_flush_user_cache_range)
198
199/*
200 *	coherent_kern_range(start, end)
201 *
202 *	Ensure coherency between the Icache and the Dcache in the
203 *	region described by start, end.  If you have non-snooping
204 *	Harvard caches, you need to implement this function.
205 *
206 *	- start	- virtual start address
207 *	- end	- virtual end address
208 */
209	.align	5
210SYM_TYPED_FUNC_START(feroceon_coherent_kern_range)
211#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
212	b	feroceon_coherent_user_range
213#endif
214SYM_FUNC_END(feroceon_coherent_kern_range)
215
216/*
217 *	coherent_user_range(start, end)
218 *
219 *	Ensure coherency between the Icache and the Dcache in the
220 *	region described by start, end.  If you have non-snooping
221 *	Harvard caches, you need to implement this function.
222 *
223 *	- start	- virtual start address
224 *	- end	- virtual end address
225 */
226SYM_TYPED_FUNC_START(feroceon_coherent_user_range)
227	bic	r0, r0, #CACHE_DLINESIZE - 1
2281:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
229	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
230	add	r0, r0, #CACHE_DLINESIZE
231	cmp	r0, r1
232	blo	1b
233	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
234	mov	r0, #0
235	ret	lr
236SYM_FUNC_END(feroceon_coherent_user_range)
237
238/*
239 *	flush_kern_dcache_area(void *addr, size_t size)
240 *
241 *	Ensure no D cache aliasing occurs, either with itself or
242 *	the I cache
243 *
244 *	- addr	- kernel address
245 *	- size	- region size
246 */
247	.align	5
248SYM_TYPED_FUNC_START(feroceon_flush_kern_dcache_area)
249	add	r1, r0, r1
2501:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
251	add	r0, r0, #CACHE_DLINESIZE
252	cmp	r0, r1
253	blo	1b
254	mov	r0, #0
255	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
256	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
257	ret	lr
258SYM_FUNC_END(feroceon_flush_kern_dcache_area)
259
260	.align	5
261SYM_TYPED_FUNC_START(feroceon_range_flush_kern_dcache_area)
262	mrs	r2, cpsr
263	add	r1, r0, #PAGE_SZ - CACHE_DLINESIZE	@ top addr is inclusive
264	orr	r3, r2, #PSR_I_BIT
265	msr	cpsr_c, r3			@ disable interrupts
266	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
267	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
268	msr	cpsr_c, r2			@ restore interrupts
269	mov	r0, #0
270	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
271	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
272	ret	lr
273SYM_FUNC_END(feroceon_range_flush_kern_dcache_area)
274
275/*
276 *	dma_inv_range(start, end)
277 *
278 *	Invalidate (discard) the specified virtual address range.
279 *	May not write back any entries.  If 'start' or 'end'
280 *	are not cache line aligned, those lines must be written
281 *	back.
282 *
283 *	- start	- virtual start address
284 *	- end	- virtual end address
285 *
286 * (same as v4wb)
287 */
288	.align	5
289feroceon_dma_inv_range:
290	tst	r0, #CACHE_DLINESIZE - 1
291	bic	r0, r0, #CACHE_DLINESIZE - 1
292	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
293	tst	r1, #CACHE_DLINESIZE - 1
294	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
2951:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
296	add	r0, r0, #CACHE_DLINESIZE
297	cmp	r0, r1
298	blo	1b
299	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
300	ret	lr
301
302	.align	5
303feroceon_range_dma_inv_range:
304	mrs	r2, cpsr
305	tst	r0, #CACHE_DLINESIZE - 1
306	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
307	tst	r1, #CACHE_DLINESIZE - 1
308	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
309	cmp	r1, r0
310	subne	r1, r1, #1			@ top address is inclusive
311	orr	r3, r2, #PSR_I_BIT
312	msr	cpsr_c, r3			@ disable interrupts
313	mcr	p15, 5, r0, c15, c14, 0		@ D inv range start
314	mcr	p15, 5, r1, c15, c14, 1		@ D inv range top
315	msr	cpsr_c, r2			@ restore interrupts
316	ret	lr
317
318/*
319 *	dma_clean_range(start, end)
320 *
321 *	Clean the specified virtual address range.
322 *
323 *	- start	- virtual start address
324 *	- end	- virtual end address
325 *
326 * (same as v4wb)
327 */
328	.align	5
329feroceon_dma_clean_range:
330	bic	r0, r0, #CACHE_DLINESIZE - 1
3311:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
332	add	r0, r0, #CACHE_DLINESIZE
333	cmp	r0, r1
334	blo	1b
335	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
336	ret	lr
337
338	.align	5
339feroceon_range_dma_clean_range:
340	mrs	r2, cpsr
341	cmp	r1, r0
342	subne	r1, r1, #1			@ top address is inclusive
343	orr	r3, r2, #PSR_I_BIT
344	msr	cpsr_c, r3			@ disable interrupts
345	mcr	p15, 5, r0, c15, c13, 0		@ D clean range start
346	mcr	p15, 5, r1, c15, c13, 1		@ D clean range top
347	msr	cpsr_c, r2			@ restore interrupts
348	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
349	ret	lr
350
351/*
352 *	dma_flush_range(start, end)
353 *
354 *	Clean and invalidate the specified virtual address range.
355 *
356 *	- start	- virtual start address
357 *	- end	- virtual end address
358 */
359	.align	5
360SYM_TYPED_FUNC_START(feroceon_dma_flush_range)
361	bic	r0, r0, #CACHE_DLINESIZE - 1
3621:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
363	add	r0, r0, #CACHE_DLINESIZE
364	cmp	r0, r1
365	blo	1b
366	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
367	ret	lr
368SYM_FUNC_END(feroceon_dma_flush_range)
369
370	.align	5
371SYM_TYPED_FUNC_START(feroceon_range_dma_flush_range)
372	mrs	r2, cpsr
373	cmp	r1, r0
374	subne	r1, r1, #1			@ top address is inclusive
375	orr	r3, r2, #PSR_I_BIT
376	msr	cpsr_c, r3			@ disable interrupts
377	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
378	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
379	msr	cpsr_c, r2			@ restore interrupts
380	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
381	ret	lr
382SYM_FUNC_END(feroceon_range_dma_flush_range)
383
384/*
385 *	dma_map_area(start, size, dir)
386 *	- start	- kernel virtual start address
387 *	- size	- size of region
388 *	- dir	- DMA direction
389 */
390SYM_TYPED_FUNC_START(feroceon_dma_map_area)
391	add	r1, r1, r0
392	cmp	r2, #DMA_TO_DEVICE
393	beq	feroceon_dma_clean_range
394	bcs	feroceon_dma_inv_range
395	b	feroceon_dma_flush_range
396SYM_FUNC_END(feroceon_dma_map_area)
397
398/*
399 *	dma_map_area(start, size, dir)
400 *	- start	- kernel virtual start address
401 *	- size	- size of region
402 *	- dir	- DMA direction
403 */
404SYM_TYPED_FUNC_START(feroceon_range_dma_map_area)
405	add	r1, r1, r0
406	cmp	r2, #DMA_TO_DEVICE
407	beq	feroceon_range_dma_clean_range
408	bcs	feroceon_range_dma_inv_range
409	b	feroceon_range_dma_flush_range
410SYM_FUNC_END(feroceon_range_dma_map_area)
411
412/*
413 *	dma_unmap_area(start, size, dir)
414 *	- start	- kernel virtual start address
415 *	- size	- size of region
416 *	- dir	- DMA direction
417 */
418SYM_TYPED_FUNC_START(feroceon_dma_unmap_area)
419	ret	lr
420SYM_FUNC_END(feroceon_dma_unmap_area)
421
422	.align	5
423SYM_TYPED_FUNC_START(cpu_feroceon_dcache_clean_area)
424#if defined(CONFIG_CACHE_FEROCEON_L2) && \
425	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
426	mov	r2, r0
427	mov	r3, r1
428#endif
4291:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
430	add	r0, r0, #CACHE_DLINESIZE
431	subs	r1, r1, #CACHE_DLINESIZE
432	bhi	1b
433#if defined(CONFIG_CACHE_FEROCEON_L2) && \
434	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4351:	mcr	p15, 1, r2, c15, c9, 1		@ clean L2 entry
436	add	r2, r2, #CACHE_DLINESIZE
437	subs	r3, r3, #CACHE_DLINESIZE
438	bhi	1b
439#endif
440	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
441	ret	lr
442SYM_FUNC_END(cpu_feroceon_dcache_clean_area)
443
444/* =============================== PageTable ============================== */
445
446/*
447 * cpu_feroceon_switch_mm(pgd)
448 *
449 * Set the translation base pointer to be as described by pgd.
450 *
451 * pgd: new page tables
452 */
453	.align	5
454SYM_TYPED_FUNC_START(cpu_feroceon_switch_mm)
455#ifdef CONFIG_MMU
456	/*
457	 * Note: we wish to call __flush_whole_cache but we need to preserve
458	 * lr to do so.  The only way without touching main memory is to
459	 * use r2 which is normally used to test the VM_EXEC flag, and
460	 * compensate locally for the skipped ops if it is not set.
461	 */
462	mov	r2, lr				@ abuse r2 to preserve lr
463	bl	__flush_whole_cache
464	@ if r2 contains the VM_EXEC bit then the next 2 ops are done already
465	tst	r2, #VM_EXEC
466	mcreq	p15, 0, ip, c7, c5, 0		@ invalidate I cache
467	mcreq	p15, 0, ip, c7, c10, 4		@ drain WB
468
469	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
470	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
471	ret	r2
472#else
473	ret	lr
474#endif
475SYM_FUNC_END(cpu_feroceon_switch_mm)
476
477/*
478 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
479 *
480 * Set a PTE and flush it out
481 */
482	.align	5
483SYM_TYPED_FUNC_START(cpu_feroceon_set_pte_ext)
484#ifdef CONFIG_MMU
485	armv3_set_pte_ext wc_disable=0
486	mov	r0, r0
487	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
488#if defined(CONFIG_CACHE_FEROCEON_L2) && \
489	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
490	mcr	p15, 1, r0, c15, c9, 1		@ clean L2 entry
491#endif
492	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
493#endif
494	ret	lr
495SYM_FUNC_END(cpu_feroceon_set_pte_ext)
496
497/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
498.globl	cpu_feroceon_suspend_size
499.equ	cpu_feroceon_suspend_size, 4 * 3
500#ifdef CONFIG_ARM_CPU_SUSPEND
501SYM_TYPED_FUNC_START(cpu_feroceon_do_suspend)
502	stmfd	sp!, {r4 - r6, lr}
503	mrc	p15, 0, r4, c13, c0, 0	@ PID
504	mrc	p15, 0, r5, c3, c0, 0	@ Domain ID
505	mrc	p15, 0, r6, c1, c0, 0	@ Control register
506	stmia	r0, {r4 - r6}
507	ldmfd	sp!, {r4 - r6, pc}
508SYM_FUNC_END(cpu_feroceon_do_suspend)
509
510SYM_TYPED_FUNC_START(cpu_feroceon_do_resume)
511	mov	ip, #0
512	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I+D TLBs
513	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I+D caches
514	ldmia	r0, {r4 - r6}
515	mcr	p15, 0, r4, c13, c0, 0	@ PID
516	mcr	p15, 0, r5, c3, c0, 0	@ Domain ID
517	mcr	p15, 0, r1, c2, c0, 0	@ TTB address
518	mov	r0, r6			@ control register
519	b	cpu_resume_mmu
520SYM_FUNC_END(cpu_feroceon_do_resume)
521#endif
522
523	.type	__feroceon_setup, #function
524__feroceon_setup:
525	mov	r0, #0
526	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
527	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
528#ifdef CONFIG_MMU
529	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
530#endif
531
532	adr	r5, feroceon_crval
533	ldmia	r5, {r5, r6}
534	mrc	p15, 0, r0, c1, c0		@ get control register v4
535	bic	r0, r0, r5
536	orr	r0, r0, r6
537	ret	lr
538	.size	__feroceon_setup, . - __feroceon_setup
539
540	/*
541	 *      B
542	 *  R   P
543	 * .RVI UFRS BLDP WCAM
544	 * .011 .001 ..11 0101
545	 *
546	 */
547	.type	feroceon_crval, #object
548feroceon_crval:
549	crval	clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
550
551	__INITDATA
552
553	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
554	define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
555
556	.section ".rodata"
557
558	string	cpu_arch_name, "armv5te"
559	string	cpu_elf_name, "v5"
560	string	cpu_feroceon_name, "Feroceon"
561	string	cpu_88fr531_name, "Feroceon 88FR531-vd"
562	string	cpu_88fr571_name, "Feroceon 88FR571-vd"
563	string	cpu_88fr131_name, "Feroceon 88FR131"
564
565	.align
566
567	.section ".proc.info.init", "a"
568
569.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
570	.type	__\name\()_proc_info,#object
571__\name\()_proc_info:
572	.long	\cpu_val
573	.long	\cpu_mask
574	.long	PMD_TYPE_SECT | \
575		PMD_SECT_BUFFERABLE | \
576		PMD_SECT_CACHEABLE | \
577		PMD_BIT4 | \
578		PMD_SECT_AP_WRITE | \
579		PMD_SECT_AP_READ
580	.long	PMD_TYPE_SECT | \
581		PMD_BIT4 | \
582		PMD_SECT_AP_WRITE | \
583		PMD_SECT_AP_READ
584	initfn	__feroceon_setup, __\name\()_proc_info
585	.long	cpu_arch_name
586	.long	cpu_elf_name
587	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
588	.long	\cpu_name
589	.long	feroceon_processor_functions
590	.long	v4wbi_tlb_fns
591	.long	feroceon_user_fns
592	.long	\cache
593	 .size	__\name\()_proc_info, . - __\name\()_proc_info
594.endm
595
596#ifdef CONFIG_CPU_FEROCEON_OLD_ID
597	feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
598		cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
599#endif
600
601	feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
602		cache=feroceon_cache_fns
603	feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
604		cache=feroceon_range_cache_fns
605	feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
606		cache=feroceon_range_cache_fns
607