1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@fluxnic.net> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 @ enable write buffer coalescing. Some bootloader disable it 118 mrc p15, 0, r1, c1, c0, 1 119 bic r1, r1, #1 120 mcr p15, 0, r1, c1, c0, 1 121 mov pc, lr 122 123/* 124 * cpu_xscale_proc_fin() 125 */ 126ENTRY(cpu_xscale_proc_fin) 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 128 bic r0, r0, #0x1800 @ ...IZ........... 129 bic r0, r0, #0x0006 @ .............CA. 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 mov pc, lr 132 133/* 134 * cpu_xscale_reset(loc) 135 * 136 * Perform a soft reset of the system. Put the CPU into the 137 * same state as it would be if it had been reset, and branch 138 * to what would be the reset vector. 139 * 140 * loc: location to jump to for soft reset 141 * 142 * Beware PXA270 erratum E7. 143 */ 144 .align 5 145ENTRY(cpu_xscale_reset) 146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 msr cpsr_c, r1 @ reset CPSR 148 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 149 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 150 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 151 bic r1, r1, #0x0086 @ ........B....CA. 152 bic r1, r1, #0x3900 @ ..VIZ..S........ 153 sub pc, pc, #4 @ flush pipeline 154 @ *** cache line aligned *** 155 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 156 bic r1, r1, #0x0001 @ ...............M 157 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 158 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 159 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ already containing those two last instructions to survive. 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mov pc, r0 163 164/* 165 * cpu_xscale_do_idle() 166 * 167 * Cause the processor to idle 168 * 169 * For now we do nothing but go to idle mode for every case 170 * 171 * XScale supports clock switching, but using idle mode support 172 * allows external hardware to react to system state changes. 173 */ 174 .align 5 175 176ENTRY(cpu_xscale_do_idle) 177 mov r0, #1 178 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 179 mov pc, lr 180 181/* ================================= CACHE ================================ */ 182 183/* 184 * flush_user_cache_all() 185 * 186 * Invalidate all cache entries in a particular address 187 * space. 188 */ 189ENTRY(xscale_flush_user_cache_all) 190 /* FALLTHROUGH */ 191 192/* 193 * flush_kern_cache_all() 194 * 195 * Clean and invalidate the entire cache. 196 */ 197ENTRY(xscale_flush_kern_cache_all) 198 mov r2, #VM_EXEC 199 mov ip, #0 200__flush_whole_cache: 201 clean_d_cache r0, r1 202 tst r2, #VM_EXEC 203 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 204 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 205 mov pc, lr 206 207/* 208 * flush_user_cache_range(start, end, vm_flags) 209 * 210 * Invalidate a range of cache entries in the specified 211 * address space. 212 * 213 * - start - start address (may not be aligned) 214 * - end - end address (exclusive, may not be aligned) 215 * - vma - vma_area_struct describing address space 216 */ 217 .align 5 218ENTRY(xscale_flush_user_cache_range) 219 mov ip, #0 220 sub r3, r1, r0 @ calculate total size 221 cmp r3, #MAX_AREA_SIZE 222 bhs __flush_whole_cache 223 2241: tst r2, #VM_EXEC 225 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 226 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 227 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 228 add r0, r0, #CACHELINESIZE 229 cmp r0, r1 230 blo 1b 231 tst r2, #VM_EXEC 232 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 233 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 234 mov pc, lr 235 236/* 237 * coherent_kern_range(start, end) 238 * 239 * Ensure coherency between the Icache and the Dcache in the 240 * region described by start. If you have non-snooping 241 * Harvard caches, you need to implement this function. 242 * 243 * - start - virtual start address 244 * - end - virtual end address 245 * 246 * Note: single I-cache line invalidation isn't used here since 247 * it also trashes the mini I-cache used by JTAG debuggers. 248 */ 249ENTRY(xscale_coherent_kern_range) 250 bic r0, r0, #CACHELINESIZE - 1 2511: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 252 add r0, r0, #CACHELINESIZE 253 cmp r0, r1 254 blo 1b 255 mov r0, #0 256 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 257 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 258 mov pc, lr 259 260/* 261 * coherent_user_range(start, end) 262 * 263 * Ensure coherency between the Icache and the Dcache in the 264 * region described by start. If you have non-snooping 265 * Harvard caches, you need to implement this function. 266 * 267 * - start - virtual start address 268 * - end - virtual end address 269 */ 270ENTRY(xscale_coherent_user_range) 271 bic r0, r0, #CACHELINESIZE - 1 2721: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 273 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 274 add r0, r0, #CACHELINESIZE 275 cmp r0, r1 276 blo 1b 277 mov r0, #0 278 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 279 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 280 mov pc, lr 281 282/* 283 * flush_kern_dcache_area(void *addr, size_t size) 284 * 285 * Ensure no D cache aliasing occurs, either with itself or 286 * the I cache 287 * 288 * - addr - kernel address 289 * - size - region size 290 */ 291ENTRY(xscale_flush_kern_dcache_area) 292 add r1, r0, r1 2931: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 294 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 295 add r0, r0, #CACHELINESIZE 296 cmp r0, r1 297 blo 1b 298 mov r0, #0 299 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 300 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 301 mov pc, lr 302 303/* 304 * dma_inv_range(start, end) 305 * 306 * Invalidate (discard) the specified virtual address range. 307 * May not write back any entries. If 'start' or 'end' 308 * are not cache line aligned, those lines must be written 309 * back. 310 * 311 * - start - virtual start address 312 * - end - virtual end address 313 */ 314xscale_dma_inv_range: 315 tst r0, #CACHELINESIZE - 1 316 bic r0, r0, #CACHELINESIZE - 1 317 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 318 tst r1, #CACHELINESIZE - 1 319 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3201: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 321 add r0, r0, #CACHELINESIZE 322 cmp r0, r1 323 blo 1b 324 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 325 mov pc, lr 326 327/* 328 * dma_clean_range(start, end) 329 * 330 * Clean the specified virtual address range. 331 * 332 * - start - virtual start address 333 * - end - virtual end address 334 */ 335xscale_dma_clean_range: 336 bic r0, r0, #CACHELINESIZE - 1 3371: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 338 add r0, r0, #CACHELINESIZE 339 cmp r0, r1 340 blo 1b 341 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 342 mov pc, lr 343 344/* 345 * dma_flush_range(start, end) 346 * 347 * Clean and invalidate the specified virtual address range. 348 * 349 * - start - virtual start address 350 * - end - virtual end address 351 */ 352ENTRY(xscale_dma_flush_range) 353 bic r0, r0, #CACHELINESIZE - 1 3541: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 355 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 356 add r0, r0, #CACHELINESIZE 357 cmp r0, r1 358 blo 1b 359 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 360 mov pc, lr 361 362/* 363 * dma_map_area(start, size, dir) 364 * - start - kernel virtual start address 365 * - size - size of region 366 * - dir - DMA direction 367 */ 368ENTRY(xscale_dma_map_area) 369 add r1, r1, r0 370 cmp r2, #DMA_TO_DEVICE 371 beq xscale_dma_clean_range 372 bcs xscale_dma_inv_range 373 b xscale_dma_flush_range 374ENDPROC(xscale_dma_map_area) 375 376/* 377 * dma_map_area(start, size, dir) 378 * - start - kernel virtual start address 379 * - size - size of region 380 * - dir - DMA direction 381 */ 382ENTRY(xscale_dma_a0_map_area) 383 add r1, r1, r0 384 teq r2, #DMA_TO_DEVICE 385 beq xscale_dma_clean_range 386 b xscale_dma_flush_range 387ENDPROC(xscsale_dma_a0_map_area) 388 389/* 390 * dma_unmap_area(start, size, dir) 391 * - start - kernel virtual start address 392 * - size - size of region 393 * - dir - DMA direction 394 */ 395ENTRY(xscale_dma_unmap_area) 396 mov pc, lr 397ENDPROC(xscale_dma_unmap_area) 398 399ENTRY(xscale_cache_fns) 400 .long xscale_flush_kern_cache_all 401 .long xscale_flush_user_cache_all 402 .long xscale_flush_user_cache_range 403 .long xscale_coherent_kern_range 404 .long xscale_coherent_user_range 405 .long xscale_flush_kern_dcache_area 406 .long xscale_dma_map_area 407 .long xscale_dma_unmap_area 408 .long xscale_dma_flush_range 409 410/* 411 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 412 * clear the dirty bits, which means that if we invalidate a dirty line, 413 * the dirty data can still be written back to external memory later on. 414 * 415 * The recommended workaround is to always do a clean D-cache line before 416 * doing an invalidate D-cache line, so on the affected processors, 417 * dma_inv_range() is implemented as dma_flush_range(). 418 * 419 * See erratum #25 of "Intel 80200 Processor Specification Update", 420 * revision January 22, 2003, available at: 421 * http://www.intel.com/design/iio/specupdt/273415.htm 422 */ 423ENTRY(xscale_80200_A0_A1_cache_fns) 424 .long xscale_flush_kern_cache_all 425 .long xscale_flush_user_cache_all 426 .long xscale_flush_user_cache_range 427 .long xscale_coherent_kern_range 428 .long xscale_coherent_user_range 429 .long xscale_flush_kern_dcache_area 430 .long xscale_dma_a0_map_area 431 .long xscale_dma_unmap_area 432 .long xscale_dma_flush_range 433 434ENTRY(cpu_xscale_dcache_clean_area) 4351: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 436 add r0, r0, #CACHELINESIZE 437 subs r1, r1, #CACHELINESIZE 438 bhi 1b 439 mov pc, lr 440 441/* =============================== PageTable ============================== */ 442 443/* 444 * cpu_xscale_switch_mm(pgd) 445 * 446 * Set the translation base pointer to be as described by pgd. 447 * 448 * pgd: new page tables 449 */ 450 .align 5 451ENTRY(cpu_xscale_switch_mm) 452 clean_d_cache r1, r2 453 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 454 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 455 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 456 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 457 cpwait_ret lr, ip 458 459/* 460 * cpu_xscale_set_pte_ext(ptep, pte, ext) 461 * 462 * Set a PTE and flush it out 463 * 464 * Errata 40: must set memory to write-through for user read-only pages. 465 */ 466cpu_xscale_mt_table: 467 .long 0x00 @ L_PTE_MT_UNCACHED 468 .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE 469 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 470 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 471 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 472 .long 0x00 @ unused 473 .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE 474 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC 475 .long 0x00 @ unused 476 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC 477 .long 0x00 @ unused 478 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 479 .long 0x00 @ L_PTE_MT_DEV_NONSHARED 480 .long 0x00 @ unused 481 .long 0x00 @ unused 482 .long 0x00 @ unused 483 484 .align 5 485ENTRY(cpu_xscale_set_pte_ext) 486 xscale_set_pte_ext_prologue 487 488 @ 489 @ Erratum 40: must set memory to write-through for user read-only pages 490 @ 491 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) 492 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER 493 494 moveq r1, #L_PTE_MT_WRITETHROUGH 495 and r1, r1, #L_PTE_MT_MASK 496 adr ip, cpu_xscale_mt_table 497 ldr ip, [ip, r1] 498 bic r2, r2, #0x0c 499 orr r2, r2, ip 500 501 xscale_set_pte_ext_epilogue 502 mov pc, lr 503 504 505 .ltorg 506 507 .align 508 509 __INIT 510 511 .type __xscale_setup, #function 512__xscale_setup: 513 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 514 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 515 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 516 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 517 orr r0, r0, #1 << 13 @ Its undefined whether this 518 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 519 520 adr r5, xscale_crval 521 ldmia r5, {r5, r6} 522 mrc p15, 0, r0, c1, c0, 0 @ get control register 523 bic r0, r0, r5 524 orr r0, r0, r6 525 mov pc, lr 526 .size __xscale_setup, . - __xscale_setup 527 528 /* 529 * R 530 * .RVI ZFRS BLDP WCAM 531 * ..11 1.01 .... .101 532 * 533 */ 534 .type xscale_crval, #object 535xscale_crval: 536 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 537 538 __INITDATA 539 540/* 541 * Purpose : Function pointers used to access above functions - all calls 542 * come through these 543 */ 544 545 .type xscale_processor_functions, #object 546ENTRY(xscale_processor_functions) 547 .word v5t_early_abort 548 .word legacy_pabort 549 .word cpu_xscale_proc_init 550 .word cpu_xscale_proc_fin 551 .word cpu_xscale_reset 552 .word cpu_xscale_do_idle 553 .word cpu_xscale_dcache_clean_area 554 .word cpu_xscale_switch_mm 555 .word cpu_xscale_set_pte_ext 556 .size xscale_processor_functions, . - xscale_processor_functions 557 558 .section ".rodata" 559 560 .type cpu_arch_name, #object 561cpu_arch_name: 562 .asciz "armv5te" 563 .size cpu_arch_name, . - cpu_arch_name 564 565 .type cpu_elf_name, #object 566cpu_elf_name: 567 .asciz "v5" 568 .size cpu_elf_name, . - cpu_elf_name 569 570 .type cpu_80200_A0_A1_name, #object 571cpu_80200_A0_A1_name: 572 .asciz "XScale-80200 A0/A1" 573 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 574 575 .type cpu_80200_name, #object 576cpu_80200_name: 577 .asciz "XScale-80200" 578 .size cpu_80200_name, . - cpu_80200_name 579 580 .type cpu_80219_name, #object 581cpu_80219_name: 582 .asciz "XScale-80219" 583 .size cpu_80219_name, . - cpu_80219_name 584 585 .type cpu_8032x_name, #object 586cpu_8032x_name: 587 .asciz "XScale-IOP8032x Family" 588 .size cpu_8032x_name, . - cpu_8032x_name 589 590 .type cpu_8033x_name, #object 591cpu_8033x_name: 592 .asciz "XScale-IOP8033x Family" 593 .size cpu_8033x_name, . - cpu_8033x_name 594 595 .type cpu_pxa250_name, #object 596cpu_pxa250_name: 597 .asciz "XScale-PXA250" 598 .size cpu_pxa250_name, . - cpu_pxa250_name 599 600 .type cpu_pxa210_name, #object 601cpu_pxa210_name: 602 .asciz "XScale-PXA210" 603 .size cpu_pxa210_name, . - cpu_pxa210_name 604 605 .type cpu_ixp42x_name, #object 606cpu_ixp42x_name: 607 .asciz "XScale-IXP42x Family" 608 .size cpu_ixp42x_name, . - cpu_ixp42x_name 609 610 .type cpu_ixp43x_name, #object 611cpu_ixp43x_name: 612 .asciz "XScale-IXP43x Family" 613 .size cpu_ixp43x_name, . - cpu_ixp43x_name 614 615 .type cpu_ixp46x_name, #object 616cpu_ixp46x_name: 617 .asciz "XScale-IXP46x Family" 618 .size cpu_ixp46x_name, . - cpu_ixp46x_name 619 620 .type cpu_ixp2400_name, #object 621cpu_ixp2400_name: 622 .asciz "XScale-IXP2400" 623 .size cpu_ixp2400_name, . - cpu_ixp2400_name 624 625 .type cpu_ixp2800_name, #object 626cpu_ixp2800_name: 627 .asciz "XScale-IXP2800" 628 .size cpu_ixp2800_name, . - cpu_ixp2800_name 629 630 .type cpu_pxa255_name, #object 631cpu_pxa255_name: 632 .asciz "XScale-PXA255" 633 .size cpu_pxa255_name, . - cpu_pxa255_name 634 635 .type cpu_pxa270_name, #object 636cpu_pxa270_name: 637 .asciz "XScale-PXA270" 638 .size cpu_pxa270_name, . - cpu_pxa270_name 639 640 .align 641 642 .section ".proc.info.init", #alloc, #execinstr 643 644 .type __80200_A0_A1_proc_info,#object 645__80200_A0_A1_proc_info: 646 .long 0x69052000 647 .long 0xfffffffe 648 .long PMD_TYPE_SECT | \ 649 PMD_SECT_BUFFERABLE | \ 650 PMD_SECT_CACHEABLE | \ 651 PMD_SECT_AP_WRITE | \ 652 PMD_SECT_AP_READ 653 .long PMD_TYPE_SECT | \ 654 PMD_SECT_AP_WRITE | \ 655 PMD_SECT_AP_READ 656 b __xscale_setup 657 .long cpu_arch_name 658 .long cpu_elf_name 659 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 660 .long cpu_80200_name 661 .long xscale_processor_functions 662 .long v4wbi_tlb_fns 663 .long xscale_mc_user_fns 664 .long xscale_80200_A0_A1_cache_fns 665 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info 666 667 .type __80200_proc_info,#object 668__80200_proc_info: 669 .long 0x69052000 670 .long 0xfffffff0 671 .long PMD_TYPE_SECT | \ 672 PMD_SECT_BUFFERABLE | \ 673 PMD_SECT_CACHEABLE | \ 674 PMD_SECT_AP_WRITE | \ 675 PMD_SECT_AP_READ 676 .long PMD_TYPE_SECT | \ 677 PMD_SECT_AP_WRITE | \ 678 PMD_SECT_AP_READ 679 b __xscale_setup 680 .long cpu_arch_name 681 .long cpu_elf_name 682 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 683 .long cpu_80200_name 684 .long xscale_processor_functions 685 .long v4wbi_tlb_fns 686 .long xscale_mc_user_fns 687 .long xscale_cache_fns 688 .size __80200_proc_info, . - __80200_proc_info 689 690 .type __80219_proc_info,#object 691__80219_proc_info: 692 .long 0x69052e20 693 .long 0xffffffe0 694 .long PMD_TYPE_SECT | \ 695 PMD_SECT_BUFFERABLE | \ 696 PMD_SECT_CACHEABLE | \ 697 PMD_SECT_AP_WRITE | \ 698 PMD_SECT_AP_READ 699 .long PMD_TYPE_SECT | \ 700 PMD_SECT_AP_WRITE | \ 701 PMD_SECT_AP_READ 702 b __xscale_setup 703 .long cpu_arch_name 704 .long cpu_elf_name 705 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 706 .long cpu_80219_name 707 .long xscale_processor_functions 708 .long v4wbi_tlb_fns 709 .long xscale_mc_user_fns 710 .long xscale_cache_fns 711 .size __80219_proc_info, . - __80219_proc_info 712 713 .type __8032x_proc_info,#object 714__8032x_proc_info: 715 .long 0x69052420 716 .long 0xfffff7e0 717 .long PMD_TYPE_SECT | \ 718 PMD_SECT_BUFFERABLE | \ 719 PMD_SECT_CACHEABLE | \ 720 PMD_SECT_AP_WRITE | \ 721 PMD_SECT_AP_READ 722 .long PMD_TYPE_SECT | \ 723 PMD_SECT_AP_WRITE | \ 724 PMD_SECT_AP_READ 725 b __xscale_setup 726 .long cpu_arch_name 727 .long cpu_elf_name 728 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 729 .long cpu_8032x_name 730 .long xscale_processor_functions 731 .long v4wbi_tlb_fns 732 .long xscale_mc_user_fns 733 .long xscale_cache_fns 734 .size __8032x_proc_info, . - __8032x_proc_info 735 736 .type __8033x_proc_info,#object 737__8033x_proc_info: 738 .long 0x69054010 739 .long 0xfffffd30 740 .long PMD_TYPE_SECT | \ 741 PMD_SECT_BUFFERABLE | \ 742 PMD_SECT_CACHEABLE | \ 743 PMD_SECT_AP_WRITE | \ 744 PMD_SECT_AP_READ 745 .long PMD_TYPE_SECT | \ 746 PMD_SECT_AP_WRITE | \ 747 PMD_SECT_AP_READ 748 b __xscale_setup 749 .long cpu_arch_name 750 .long cpu_elf_name 751 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 752 .long cpu_8033x_name 753 .long xscale_processor_functions 754 .long v4wbi_tlb_fns 755 .long xscale_mc_user_fns 756 .long xscale_cache_fns 757 .size __8033x_proc_info, . - __8033x_proc_info 758 759 .type __pxa250_proc_info,#object 760__pxa250_proc_info: 761 .long 0x69052100 762 .long 0xfffff7f0 763 .long PMD_TYPE_SECT | \ 764 PMD_SECT_BUFFERABLE | \ 765 PMD_SECT_CACHEABLE | \ 766 PMD_SECT_AP_WRITE | \ 767 PMD_SECT_AP_READ 768 .long PMD_TYPE_SECT | \ 769 PMD_SECT_AP_WRITE | \ 770 PMD_SECT_AP_READ 771 b __xscale_setup 772 .long cpu_arch_name 773 .long cpu_elf_name 774 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 775 .long cpu_pxa250_name 776 .long xscale_processor_functions 777 .long v4wbi_tlb_fns 778 .long xscale_mc_user_fns 779 .long xscale_cache_fns 780 .size __pxa250_proc_info, . - __pxa250_proc_info 781 782 .type __pxa210_proc_info,#object 783__pxa210_proc_info: 784 .long 0x69052120 785 .long 0xfffff3f0 786 .long PMD_TYPE_SECT | \ 787 PMD_SECT_BUFFERABLE | \ 788 PMD_SECT_CACHEABLE | \ 789 PMD_SECT_AP_WRITE | \ 790 PMD_SECT_AP_READ 791 .long PMD_TYPE_SECT | \ 792 PMD_SECT_AP_WRITE | \ 793 PMD_SECT_AP_READ 794 b __xscale_setup 795 .long cpu_arch_name 796 .long cpu_elf_name 797 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 798 .long cpu_pxa210_name 799 .long xscale_processor_functions 800 .long v4wbi_tlb_fns 801 .long xscale_mc_user_fns 802 .long xscale_cache_fns 803 .size __pxa210_proc_info, . - __pxa210_proc_info 804 805 .type __ixp2400_proc_info, #object 806__ixp2400_proc_info: 807 .long 0x69054190 808 .long 0xfffffff0 809 .long PMD_TYPE_SECT | \ 810 PMD_SECT_BUFFERABLE | \ 811 PMD_SECT_CACHEABLE | \ 812 PMD_SECT_AP_WRITE | \ 813 PMD_SECT_AP_READ 814 .long PMD_TYPE_SECT | \ 815 PMD_SECT_AP_WRITE | \ 816 PMD_SECT_AP_READ 817 b __xscale_setup 818 .long cpu_arch_name 819 .long cpu_elf_name 820 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 821 .long cpu_ixp2400_name 822 .long xscale_processor_functions 823 .long v4wbi_tlb_fns 824 .long xscale_mc_user_fns 825 .long xscale_cache_fns 826 .size __ixp2400_proc_info, . - __ixp2400_proc_info 827 828 .type __ixp2800_proc_info, #object 829__ixp2800_proc_info: 830 .long 0x690541a0 831 .long 0xfffffff0 832 .long PMD_TYPE_SECT | \ 833 PMD_SECT_BUFFERABLE | \ 834 PMD_SECT_CACHEABLE | \ 835 PMD_SECT_AP_WRITE | \ 836 PMD_SECT_AP_READ 837 .long PMD_TYPE_SECT | \ 838 PMD_SECT_AP_WRITE | \ 839 PMD_SECT_AP_READ 840 b __xscale_setup 841 .long cpu_arch_name 842 .long cpu_elf_name 843 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 844 .long cpu_ixp2800_name 845 .long xscale_processor_functions 846 .long v4wbi_tlb_fns 847 .long xscale_mc_user_fns 848 .long xscale_cache_fns 849 .size __ixp2800_proc_info, . - __ixp2800_proc_info 850 851 .type __ixp42x_proc_info, #object 852__ixp42x_proc_info: 853 .long 0x690541c0 854 .long 0xffffffc0 855 .long PMD_TYPE_SECT | \ 856 PMD_SECT_BUFFERABLE | \ 857 PMD_SECT_CACHEABLE | \ 858 PMD_SECT_AP_WRITE | \ 859 PMD_SECT_AP_READ 860 .long PMD_TYPE_SECT | \ 861 PMD_SECT_AP_WRITE | \ 862 PMD_SECT_AP_READ 863 b __xscale_setup 864 .long cpu_arch_name 865 .long cpu_elf_name 866 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 867 .long cpu_ixp42x_name 868 .long xscale_processor_functions 869 .long v4wbi_tlb_fns 870 .long xscale_mc_user_fns 871 .long xscale_cache_fns 872 .size __ixp42x_proc_info, . - __ixp42x_proc_info 873 874 .type __ixp43x_proc_info, #object 875__ixp43x_proc_info: 876 .long 0x69054040 877 .long 0xfffffff0 878 .long PMD_TYPE_SECT | \ 879 PMD_SECT_BUFFERABLE | \ 880 PMD_SECT_CACHEABLE | \ 881 PMD_SECT_AP_WRITE | \ 882 PMD_SECT_AP_READ 883 .long PMD_TYPE_SECT | \ 884 PMD_SECT_AP_WRITE | \ 885 PMD_SECT_AP_READ 886 b __xscale_setup 887 .long cpu_arch_name 888 .long cpu_elf_name 889 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 890 .long cpu_ixp43x_name 891 .long xscale_processor_functions 892 .long v4wbi_tlb_fns 893 .long xscale_mc_user_fns 894 .long xscale_cache_fns 895 .size __ixp43x_proc_info, . - __ixp43x_proc_info 896 897 .type __ixp46x_proc_info, #object 898__ixp46x_proc_info: 899 .long 0x69054200 900 .long 0xffffff00 901 .long PMD_TYPE_SECT | \ 902 PMD_SECT_BUFFERABLE | \ 903 PMD_SECT_CACHEABLE | \ 904 PMD_SECT_AP_WRITE | \ 905 PMD_SECT_AP_READ 906 .long PMD_TYPE_SECT | \ 907 PMD_SECT_AP_WRITE | \ 908 PMD_SECT_AP_READ 909 b __xscale_setup 910 .long cpu_arch_name 911 .long cpu_elf_name 912 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 913 .long cpu_ixp46x_name 914 .long xscale_processor_functions 915 .long v4wbi_tlb_fns 916 .long xscale_mc_user_fns 917 .long xscale_cache_fns 918 .size __ixp46x_proc_info, . - __ixp46x_proc_info 919 920 .type __pxa255_proc_info,#object 921__pxa255_proc_info: 922 .long 0x69052d00 923 .long 0xfffffff0 924 .long PMD_TYPE_SECT | \ 925 PMD_SECT_BUFFERABLE | \ 926 PMD_SECT_CACHEABLE | \ 927 PMD_SECT_AP_WRITE | \ 928 PMD_SECT_AP_READ 929 .long PMD_TYPE_SECT | \ 930 PMD_SECT_AP_WRITE | \ 931 PMD_SECT_AP_READ 932 b __xscale_setup 933 .long cpu_arch_name 934 .long cpu_elf_name 935 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 936 .long cpu_pxa255_name 937 .long xscale_processor_functions 938 .long v4wbi_tlb_fns 939 .long xscale_mc_user_fns 940 .long xscale_cache_fns 941 .size __pxa255_proc_info, . - __pxa255_proc_info 942 943 .type __pxa270_proc_info,#object 944__pxa270_proc_info: 945 .long 0x69054110 946 .long 0xfffffff0 947 .long PMD_TYPE_SECT | \ 948 PMD_SECT_BUFFERABLE | \ 949 PMD_SECT_CACHEABLE | \ 950 PMD_SECT_AP_WRITE | \ 951 PMD_SECT_AP_READ 952 .long PMD_TYPE_SECT | \ 953 PMD_SECT_AP_WRITE | \ 954 PMD_SECT_AP_READ 955 b __xscale_setup 956 .long cpu_arch_name 957 .long cpu_elf_name 958 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 959 .long cpu_pxa270_name 960 .long xscale_processor_functions 961 .long v4wbi_tlb_fns 962 .long xscale_mc_user_fns 963 .long xscale_cache_fns 964 .size __pxa270_proc_info, . - __pxa270_proc_info 965 966