1/* 2 * arch/xtensa/mm/misc.S 3 * 4 * Miscellaneous assembly functions. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2001 - 2007 Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 */ 14 15 16#include <linux/linkage.h> 17#include <linux/pgtable.h> 18#include <asm/page.h> 19#include <asm/asmmacro.h> 20#include <asm/cacheasm.h> 21#include <asm/tlbflush.h> 22 23 24/* 25 * clear_page and clear_user_page are the same for non-cache-aliased configs. 26 * 27 * clear_page (unsigned long page) 28 * a2 29 */ 30 31ENTRY(clear_page) 32 33 abi_entry_default 34 35 movi a3, 0 36 __loopi a2, a7, PAGE_SIZE, 32 37 s32i a3, a2, 0 38 s32i a3, a2, 4 39 s32i a3, a2, 8 40 s32i a3, a2, 12 41 s32i a3, a2, 16 42 s32i a3, a2, 20 43 s32i a3, a2, 24 44 s32i a3, a2, 28 45 __endla a2, a7, 32 46 47 abi_ret_default 48 49ENDPROC(clear_page) 50EXPORT_SYMBOL(clear_page) 51 52/* 53 * copy_page and copy_user_page are the same for non-cache-aliased configs. 54 * 55 * copy_page (void *to, void *from) 56 * a2 a3 57 */ 58 59ENTRY(copy_page) 60 61 abi_entry_default 62 63 __loopi a2, a4, PAGE_SIZE, 32 64 65 l32i a8, a3, 0 66 l32i a9, a3, 4 67 s32i a8, a2, 0 68 s32i a9, a2, 4 69 70 l32i a8, a3, 8 71 l32i a9, a3, 12 72 s32i a8, a2, 8 73 s32i a9, a2, 12 74 75 l32i a8, a3, 16 76 l32i a9, a3, 20 77 s32i a8, a2, 16 78 s32i a9, a2, 20 79 80 l32i a8, a3, 24 81 l32i a9, a3, 28 82 s32i a8, a2, 24 83 s32i a9, a2, 28 84 85 addi a2, a2, 32 86 addi a3, a3, 32 87 88 __endl a2, a4 89 90 abi_ret_default 91 92ENDPROC(copy_page) 93EXPORT_SYMBOL(copy_page) 94 95#ifdef CONFIG_MMU 96/* 97 * If we have to deal with cache aliasing, we use temporary memory mappings 98 * to ensure that the source and destination pages have the same color as 99 * the virtual address. We use way 0 and 1 for temporary mappings in such cases. 100 * 101 * The temporary DTLB entries shouldn't be flushed by interrupts, but are 102 * flushed by preemptive task switches. Special code in the 103 * fast_second_level_miss handler re-established the temporary mapping. 104 * It requires that the PPNs for the destination and source addresses are 105 * in a6, and a7, respectively. 106 */ 107 108/* TLB miss exceptions are treated special in the following region */ 109 110ENTRY(__tlbtemp_mapping_start) 111 112#if (DCACHE_WAY_SIZE > PAGE_SIZE) 113 114/* 115 * clear_page_alias(void *addr, unsigned long paddr) 116 * a2 a3 117 */ 118 119ENTRY(clear_page_alias) 120 121 abi_entry_default 122 123 movi a5, PAGE_OFFSET 124 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 125 mov a4, a2 126 wdtlb a6, a2 127 dsync 128 129 movi a3, 0 130 __loopi a2, a7, PAGE_SIZE, 32 131 s32i a3, a2, 0 132 s32i a3, a2, 4 133 s32i a3, a2, 8 134 s32i a3, a2, 12 135 s32i a3, a2, 16 136 s32i a3, a2, 20 137 s32i a3, a2, 24 138 s32i a3, a2, 28 139 __endla a2, a7, 32 140 141 /* We need to invalidate the temporary dtlb entry. */ 142 143 idtlb a4 144 dsync 145 146 abi_ret_default 147 148ENDPROC(clear_page_alias) 149 150/* 151 * copy_page_alias(void *to, void *from, 152 * a2 a3 153 * unsigned long to_paddr, unsigned long from_paddr) 154 * a4 a5 155 */ 156 157ENTRY(copy_page_alias) 158 159 abi_entry_default 160 161 /* Setup a temporary DTLB for destination. */ 162 163 addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) 164 wdtlb a6, a2 165 dsync 166 167 /* Setup a temporary DTLB for source. */ 168 169 addi a7, a5, PAGE_KERNEL 170 addi a8, a3, 1 # way1 171 172 wdtlb a7, a8 173 dsync 174 1751: __loopi a2, a4, PAGE_SIZE, 32 176 177 l32i a8, a3, 0 178 l32i a9, a3, 4 179 s32i a8, a2, 0 180 s32i a9, a2, 4 181 182 l32i a8, a3, 8 183 l32i a9, a3, 12 184 s32i a8, a2, 8 185 s32i a9, a2, 12 186 187 l32i a8, a3, 16 188 l32i a9, a3, 20 189 s32i a8, a2, 16 190 s32i a9, a2, 20 191 192 l32i a8, a3, 24 193 l32i a9, a3, 28 194 s32i a8, a2, 24 195 s32i a9, a2, 28 196 197 addi a2, a2, 32 198 addi a3, a3, 32 199 200 __endl a2, a4 201 202 /* We need to invalidate any temporary mapping! */ 203 204 addi a2, a2, -PAGE_SIZE 205 idtlb a2 206 dsync 207 208 addi a3, a3, -PAGE_SIZE+1 209 idtlb a3 210 dsync 211 212 abi_ret_default 213 214ENDPROC(copy_page_alias) 215 216#endif 217 218#if (DCACHE_WAY_SIZE > PAGE_SIZE) 219 220/* 221 * void __flush_invalidate_dcache_page_alias (addr, phys) 222 * a2 a3 223 */ 224 225ENTRY(__flush_invalidate_dcache_page_alias) 226 227 abi_entry_default 228 229 movi a7, 0 # required for exception handler 230 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 231 mov a4, a2 232 wdtlb a6, a2 233 dsync 234 235 ___flush_invalidate_dcache_page a2 a3 236 237 idtlb a4 238 dsync 239 240 abi_ret_default 241 242ENDPROC(__flush_invalidate_dcache_page_alias) 243 244/* 245 * void __invalidate_dcache_page_alias (addr, phys) 246 * a2 a3 247 */ 248 249ENTRY(__invalidate_dcache_page_alias) 250 251 abi_entry_default 252 253 movi a7, 0 # required for exception handler 254 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 255 mov a4, a2 256 wdtlb a6, a2 257 dsync 258 259 ___invalidate_dcache_page a2 a3 260 261 idtlb a4 262 dsync 263 264 abi_ret_default 265 266ENDPROC(__invalidate_dcache_page_alias) 267#endif 268 269ENTRY(__tlbtemp_mapping_itlb) 270 271#if (ICACHE_WAY_SIZE > PAGE_SIZE) 272 273ENTRY(__invalidate_icache_page_alias) 274 275 abi_entry_default 276 277 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) 278 mov a4, a2 279 witlb a6, a2 280 isync 281 282 ___invalidate_icache_page a2 a3 283 284 iitlb a4 285 isync 286 abi_ret_default 287 288ENDPROC(__invalidate_icache_page_alias) 289 290#endif 291 292/* End of special treatment in tlb miss exception */ 293 294ENTRY(__tlbtemp_mapping_end) 295 296#endif /* CONFIG_MMU 297 298/* 299 * void __invalidate_icache_page(ulong start) 300 */ 301 302ENTRY(__invalidate_icache_page) 303 304 abi_entry_default 305 306 ___invalidate_icache_page a2 a3 307 isync 308 309 abi_ret_default 310 311ENDPROC(__invalidate_icache_page) 312 313/* 314 * void __invalidate_dcache_page(ulong start) 315 */ 316 317ENTRY(__invalidate_dcache_page) 318 319 abi_entry_default 320 321 ___invalidate_dcache_page a2 a3 322 dsync 323 324 abi_ret_default 325 326ENDPROC(__invalidate_dcache_page) 327 328/* 329 * void __flush_invalidate_dcache_page(ulong start) 330 */ 331 332ENTRY(__flush_invalidate_dcache_page) 333 334 abi_entry_default 335 336 ___flush_invalidate_dcache_page a2 a3 337 338 dsync 339 abi_ret_default 340 341ENDPROC(__flush_invalidate_dcache_page) 342 343/* 344 * void __flush_dcache_page(ulong start) 345 */ 346 347ENTRY(__flush_dcache_page) 348 349 abi_entry_default 350 351 ___flush_dcache_page a2 a3 352 353 dsync 354 abi_ret_default 355 356ENDPROC(__flush_dcache_page) 357 358/* 359 * void __invalidate_icache_range(ulong start, ulong size) 360 */ 361 362ENTRY(__invalidate_icache_range) 363 364 abi_entry_default 365 366 ___invalidate_icache_range a2 a3 a4 367 isync 368 369 abi_ret_default 370 371ENDPROC(__invalidate_icache_range) 372EXPORT_SYMBOL(__invalidate_icache_range) 373 374/* 375 * void __flush_invalidate_dcache_range(ulong start, ulong size) 376 */ 377 378ENTRY(__flush_invalidate_dcache_range) 379 380 abi_entry_default 381 382 ___flush_invalidate_dcache_range a2 a3 a4 383 dsync 384 385 abi_ret_default 386 387ENDPROC(__flush_invalidate_dcache_range) 388 389/* 390 * void _flush_dcache_range(ulong start, ulong size) 391 */ 392 393ENTRY(__flush_dcache_range) 394 395 abi_entry_default 396 397 ___flush_dcache_range a2 a3 a4 398 dsync 399 400 abi_ret_default 401 402ENDPROC(__flush_dcache_range) 403EXPORT_SYMBOL(__flush_dcache_range) 404 405/* 406 * void _invalidate_dcache_range(ulong start, ulong size) 407 */ 408 409ENTRY(__invalidate_dcache_range) 410 411 abi_entry_default 412 413 ___invalidate_dcache_range a2 a3 a4 414 415 abi_ret_default 416 417ENDPROC(__invalidate_dcache_range) 418EXPORT_SYMBOL(__invalidate_dcache_range) 419 420/* 421 * void _invalidate_icache_all(void) 422 */ 423 424ENTRY(__invalidate_icache_all) 425 426 abi_entry_default 427 428 ___invalidate_icache_all a2 a3 429 isync 430 431 abi_ret_default 432 433ENDPROC(__invalidate_icache_all) 434 435/* 436 * void _flush_invalidate_dcache_all(void) 437 */ 438 439ENTRY(__flush_invalidate_dcache_all) 440 441 abi_entry_default 442 443 ___flush_invalidate_dcache_all a2 a3 444 dsync 445 446 abi_ret_default 447 448ENDPROC(__flush_invalidate_dcache_all) 449 450/* 451 * void _invalidate_dcache_all(void) 452 */ 453 454ENTRY(__invalidate_dcache_all) 455 456 abi_entry_default 457 458 ___invalidate_dcache_all a2 a3 459 dsync 460 461 abi_ret_default 462 463ENDPROC(__invalidate_dcache_all) 464