1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@fluxnic.net> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 @ enable write buffer coalescing. Some bootloader disable it 118 mrc p15, 0, r1, c1, c0, 1 119 bic r1, r1, #1 120 mcr p15, 0, r1, c1, c0, 1 121 mov pc, lr 122 123/* 124 * cpu_xscale_proc_fin() 125 */ 126ENTRY(cpu_xscale_proc_fin) 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 128 bic r0, r0, #0x1800 @ ...IZ........... 129 bic r0, r0, #0x0006 @ .............CA. 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 mov pc, lr 132 133/* 134 * cpu_xscale_reset(loc) 135 * 136 * Perform a soft reset of the system. Put the CPU into the 137 * same state as it would be if it had been reset, and branch 138 * to what would be the reset vector. 139 * 140 * loc: location to jump to for soft reset 141 * 142 * Beware PXA270 erratum E7. 143 */ 144 .align 5 145ENTRY(cpu_xscale_reset) 146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 msr cpsr_c, r1 @ reset CPSR 148 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 149 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 150 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 151 bic r1, r1, #0x0086 @ ........B....CA. 152 bic r1, r1, #0x3900 @ ..VIZ..S........ 153 sub pc, pc, #4 @ flush pipeline 154 @ *** cache line aligned *** 155 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 156 bic r1, r1, #0x0001 @ ...............M 157 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 158 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 159 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ already containing those two last instructions to survive. 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mov pc, r0 163 164/* 165 * cpu_xscale_do_idle() 166 * 167 * Cause the processor to idle 168 * 169 * For now we do nothing but go to idle mode for every case 170 * 171 * XScale supports clock switching, but using idle mode support 172 * allows external hardware to react to system state changes. 173 */ 174 .align 5 175 176ENTRY(cpu_xscale_do_idle) 177 mov r0, #1 178 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 179 mov pc, lr 180 181/* ================================= CACHE ================================ */ 182 183/* 184 * flush_icache_all() 185 * 186 * Unconditionally clean and invalidate the entire icache. 187 */ 188ENTRY(xscale_flush_icache_all) 189 mov r0, #0 190 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 191 mov pc, lr 192ENDPROC(xscale_flush_icache_all) 193 194/* 195 * flush_user_cache_all() 196 * 197 * Invalidate all cache entries in a particular address 198 * space. 199 */ 200ENTRY(xscale_flush_user_cache_all) 201 /* FALLTHROUGH */ 202 203/* 204 * flush_kern_cache_all() 205 * 206 * Clean and invalidate the entire cache. 207 */ 208ENTRY(xscale_flush_kern_cache_all) 209 mov r2, #VM_EXEC 210 mov ip, #0 211__flush_whole_cache: 212 clean_d_cache r0, r1 213 tst r2, #VM_EXEC 214 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 215 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 216 mov pc, lr 217 218/* 219 * flush_user_cache_range(start, end, vm_flags) 220 * 221 * Invalidate a range of cache entries in the specified 222 * address space. 223 * 224 * - start - start address (may not be aligned) 225 * - end - end address (exclusive, may not be aligned) 226 * - vma - vma_area_struct describing address space 227 */ 228 .align 5 229ENTRY(xscale_flush_user_cache_range) 230 mov ip, #0 231 sub r3, r1, r0 @ calculate total size 232 cmp r3, #MAX_AREA_SIZE 233 bhs __flush_whole_cache 234 2351: tst r2, #VM_EXEC 236 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 237 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 238 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 239 add r0, r0, #CACHELINESIZE 240 cmp r0, r1 241 blo 1b 242 tst r2, #VM_EXEC 243 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 244 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 245 mov pc, lr 246 247/* 248 * coherent_kern_range(start, end) 249 * 250 * Ensure coherency between the Icache and the Dcache in the 251 * region described by start. If you have non-snooping 252 * Harvard caches, you need to implement this function. 253 * 254 * - start - virtual start address 255 * - end - virtual end address 256 * 257 * Note: single I-cache line invalidation isn't used here since 258 * it also trashes the mini I-cache used by JTAG debuggers. 259 */ 260ENTRY(xscale_coherent_kern_range) 261 bic r0, r0, #CACHELINESIZE - 1 2621: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 263 add r0, r0, #CACHELINESIZE 264 cmp r0, r1 265 blo 1b 266 mov r0, #0 267 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 268 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 269 mov pc, lr 270 271/* 272 * coherent_user_range(start, end) 273 * 274 * Ensure coherency between the Icache and the Dcache in the 275 * region described by start. If you have non-snooping 276 * Harvard caches, you need to implement this function. 277 * 278 * - start - virtual start address 279 * - end - virtual end address 280 */ 281ENTRY(xscale_coherent_user_range) 282 bic r0, r0, #CACHELINESIZE - 1 2831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 284 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 285 add r0, r0, #CACHELINESIZE 286 cmp r0, r1 287 blo 1b 288 mov r0, #0 289 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 290 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 291 mov pc, lr 292 293/* 294 * flush_kern_dcache_area(void *addr, size_t size) 295 * 296 * Ensure no D cache aliasing occurs, either with itself or 297 * the I cache 298 * 299 * - addr - kernel address 300 * - size - region size 301 */ 302ENTRY(xscale_flush_kern_dcache_area) 303 add r1, r0, r1 3041: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 305 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 306 add r0, r0, #CACHELINESIZE 307 cmp r0, r1 308 blo 1b 309 mov r0, #0 310 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 311 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 312 mov pc, lr 313 314/* 315 * dma_inv_range(start, end) 316 * 317 * Invalidate (discard) the specified virtual address range. 318 * May not write back any entries. If 'start' or 'end' 319 * are not cache line aligned, those lines must be written 320 * back. 321 * 322 * - start - virtual start address 323 * - end - virtual end address 324 */ 325xscale_dma_inv_range: 326 tst r0, #CACHELINESIZE - 1 327 bic r0, r0, #CACHELINESIZE - 1 328 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 329 tst r1, #CACHELINESIZE - 1 330 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3311: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 332 add r0, r0, #CACHELINESIZE 333 cmp r0, r1 334 blo 1b 335 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 336 mov pc, lr 337 338/* 339 * dma_clean_range(start, end) 340 * 341 * Clean the specified virtual address range. 342 * 343 * - start - virtual start address 344 * - end - virtual end address 345 */ 346xscale_dma_clean_range: 347 bic r0, r0, #CACHELINESIZE - 1 3481: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 349 add r0, r0, #CACHELINESIZE 350 cmp r0, r1 351 blo 1b 352 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 353 mov pc, lr 354 355/* 356 * dma_flush_range(start, end) 357 * 358 * Clean and invalidate the specified virtual address range. 359 * 360 * - start - virtual start address 361 * - end - virtual end address 362 */ 363ENTRY(xscale_dma_flush_range) 364 bic r0, r0, #CACHELINESIZE - 1 3651: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 366 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 367 add r0, r0, #CACHELINESIZE 368 cmp r0, r1 369 blo 1b 370 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 371 mov pc, lr 372 373/* 374 * dma_map_area(start, size, dir) 375 * - start - kernel virtual start address 376 * - size - size of region 377 * - dir - DMA direction 378 */ 379ENTRY(xscale_dma_map_area) 380 add r1, r1, r0 381 cmp r2, #DMA_TO_DEVICE 382 beq xscale_dma_clean_range 383 bcs xscale_dma_inv_range 384 b xscale_dma_flush_range 385ENDPROC(xscale_dma_map_area) 386 387/* 388 * dma_map_area(start, size, dir) 389 * - start - kernel virtual start address 390 * - size - size of region 391 * - dir - DMA direction 392 */ 393ENTRY(xscale_80200_A0_A1_dma_map_area) 394 add r1, r1, r0 395 teq r2, #DMA_TO_DEVICE 396 beq xscale_dma_clean_range 397 b xscale_dma_flush_range 398ENDPROC(xscale_80200_A0_A1_dma_map_area) 399 400/* 401 * dma_unmap_area(start, size, dir) 402 * - start - kernel virtual start address 403 * - size - size of region 404 * - dir - DMA direction 405 */ 406ENTRY(xscale_dma_unmap_area) 407 mov pc, lr 408ENDPROC(xscale_dma_unmap_area) 409 410 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 411 define_cache_functions xscale 412 413/* 414 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 415 * clear the dirty bits, which means that if we invalidate a dirty line, 416 * the dirty data can still be written back to external memory later on. 417 * 418 * The recommended workaround is to always do a clean D-cache line before 419 * doing an invalidate D-cache line, so on the affected processors, 420 * dma_inv_range() is implemented as dma_flush_range(). 421 * 422 * See erratum #25 of "Intel 80200 Processor Specification Update", 423 * revision January 22, 2003, available at: 424 * http://www.intel.com/design/iio/specupdt/273415.htm 425 */ 426.macro a0_alias basename 427 .globl xscale_80200_A0_A1_\basename 428 .type xscale_80200_A0_A1_\basename , %function 429 .equ xscale_80200_A0_A1_\basename , xscale_\basename 430.endm 431 432/* 433 * Most of the cache functions are unchanged for these processor revisions. 434 * Export suitable alias symbols for the unchanged functions: 435 */ 436 a0_alias flush_icache_all 437 a0_alias flush_user_cache_all 438 a0_alias flush_kern_cache_all 439 a0_alias flush_user_cache_range 440 a0_alias coherent_kern_range 441 a0_alias coherent_user_range 442 a0_alias flush_kern_dcache_area 443 a0_alias dma_flush_range 444 a0_alias dma_unmap_area 445 446 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 447 define_cache_functions xscale_80200_A0_A1 448 449ENTRY(cpu_xscale_dcache_clean_area) 4501: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 451 add r0, r0, #CACHELINESIZE 452 subs r1, r1, #CACHELINESIZE 453 bhi 1b 454 mov pc, lr 455 456/* =============================== PageTable ============================== */ 457 458/* 459 * cpu_xscale_switch_mm(pgd) 460 * 461 * Set the translation base pointer to be as described by pgd. 462 * 463 * pgd: new page tables 464 */ 465 .align 5 466ENTRY(cpu_xscale_switch_mm) 467 clean_d_cache r1, r2 468 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 469 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 470 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 471 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 472 cpwait_ret lr, ip 473 474/* 475 * cpu_xscale_set_pte_ext(ptep, pte, ext) 476 * 477 * Set a PTE and flush it out 478 * 479 * Errata 40: must set memory to write-through for user read-only pages. 480 */ 481cpu_xscale_mt_table: 482 .long 0x00 @ L_PTE_MT_UNCACHED 483 .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE 484 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 485 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 486 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 487 .long 0x00 @ unused 488 .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE 489 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC 490 .long 0x00 @ unused 491 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC 492 .long 0x00 @ unused 493 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 494 .long 0x00 @ L_PTE_MT_DEV_NONSHARED 495 .long 0x00 @ unused 496 .long 0x00 @ unused 497 .long 0x00 @ unused 498 499 .align 5 500ENTRY(cpu_xscale_set_pte_ext) 501 xscale_set_pte_ext_prologue 502 503 @ 504 @ Erratum 40: must set memory to write-through for user read-only pages 505 @ 506 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2) 507 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY 508 509 moveq r1, #L_PTE_MT_WRITETHROUGH 510 and r1, r1, #L_PTE_MT_MASK 511 adr ip, cpu_xscale_mt_table 512 ldr ip, [ip, r1] 513 bic r2, r2, #0x0c 514 orr r2, r2, ip 515 516 xscale_set_pte_ext_epilogue 517 mov pc, lr 518 519 .ltorg 520 .align 521 522.globl cpu_xscale_suspend_size 523.equ cpu_xscale_suspend_size, 4 * 6 524#ifdef CONFIG_PM_SLEEP 525ENTRY(cpu_xscale_do_suspend) 526 stmfd sp!, {r4 - r9, lr} 527 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 528 mrc p15, 0, r5, c15, c1, 0 @ CP access reg 529 mrc p15, 0, r6, c13, c0, 0 @ PID 530 mrc p15, 0, r7, c3, c0, 0 @ domain ID 531 mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg 532 mrc p15, 0, r9, c1, c0, 0 @ control reg 533 bic r4, r4, #2 @ clear frequency change bit 534 stmia r0, {r4 - r9} @ store cp regs 535 ldmfd sp!, {r4 - r9, pc} 536ENDPROC(cpu_xscale_do_suspend) 537 538ENTRY(cpu_xscale_do_resume) 539 ldmia r0, {r4 - r9} @ load cp regs 540 mov ip, #0 541 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 542 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 543 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. 544 mcr p15, 0, r5, c15, c1, 0 @ CP access reg 545 mcr p15, 0, r6, c13, c0, 0 @ PID 546 mcr p15, 0, r7, c3, c0, 0 @ domain ID 547 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 548 mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg 549 mov r0, r9 @ control register 550 b cpu_resume_mmu 551ENDPROC(cpu_xscale_do_resume) 552#endif 553 554 __CPUINIT 555 556 .type __xscale_setup, #function 557__xscale_setup: 558 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 559 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 560 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 561 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 562 orr r0, r0, #1 << 13 @ Its undefined whether this 563 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 564 565 adr r5, xscale_crval 566 ldmia r5, {r5, r6} 567 mrc p15, 0, r0, c1, c0, 0 @ get control register 568 bic r0, r0, r5 569 orr r0, r0, r6 570 mov pc, lr 571 .size __xscale_setup, . - __xscale_setup 572 573 /* 574 * R 575 * .RVI ZFRS BLDP WCAM 576 * ..11 1.01 .... .101 577 * 578 */ 579 .type xscale_crval, #object 580xscale_crval: 581 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 582 583 __INITDATA 584 585 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 586 define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 587 588 .section ".rodata" 589 590 string cpu_arch_name, "armv5te" 591 string cpu_elf_name, "v5" 592 593 string cpu_80200_A0_A1_name, "XScale-80200 A0/A1" 594 string cpu_80200_name, "XScale-80200" 595 string cpu_80219_name, "XScale-80219" 596 string cpu_8032x_name, "XScale-IOP8032x Family" 597 string cpu_8033x_name, "XScale-IOP8033x Family" 598 string cpu_pxa250_name, "XScale-PXA250" 599 string cpu_pxa210_name, "XScale-PXA210" 600 string cpu_ixp42x_name, "XScale-IXP42x Family" 601 string cpu_ixp43x_name, "XScale-IXP43x Family" 602 string cpu_ixp46x_name, "XScale-IXP46x Family" 603 string cpu_ixp2400_name, "XScale-IXP2400" 604 string cpu_ixp2800_name, "XScale-IXP2800" 605 string cpu_pxa255_name, "XScale-PXA255" 606 string cpu_pxa270_name, "XScale-PXA270" 607 608 .align 609 610 .section ".proc.info.init", #alloc, #execinstr 611 612.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache 613 .type __\name\()_proc_info,#object 614__\name\()_proc_info: 615 .long \cpu_val 616 .long \cpu_mask 617 .long PMD_TYPE_SECT | \ 618 PMD_SECT_BUFFERABLE | \ 619 PMD_SECT_CACHEABLE | \ 620 PMD_SECT_AP_WRITE | \ 621 PMD_SECT_AP_READ 622 .long PMD_TYPE_SECT | \ 623 PMD_SECT_AP_WRITE | \ 624 PMD_SECT_AP_READ 625 b __xscale_setup 626 .long cpu_arch_name 627 .long cpu_elf_name 628 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 629 .long \cpu_name 630 .long xscale_processor_functions 631 .long v4wbi_tlb_fns 632 .long xscale_mc_user_fns 633 .ifb \cache 634 .long xscale_cache_fns 635 .else 636 .long \cache 637 .endif 638 .size __\name\()_proc_info, . - __\name\()_proc_info 639.endm 640 641 xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \ 642 cache=xscale_80200_A0_A1_cache_fns 643 xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name 644 xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name 645 xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name 646 xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name 647 xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name 648 xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name 649 xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name 650 xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name 651 xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name 652 xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name 653 xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name 654 xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name 655 xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name 656