1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Unified implementation of memcpy, memmove and the __copy_user backend. 7 * 8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) 9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. 10 * Copyright (C) 2002 Broadcom, Inc. 11 * memcpy/copy_user author: Mark Vandevoorde 12 * Copyright (C) 2007 Maciej W. Rozycki 13 * Copyright (C) 2014 Imagination Technologies Ltd. 14 * 15 * Mnemonic names for arguments to memcpy/__copy_user 16 */ 17 18/* 19 * Hack to resolve longstanding prefetch issue 20 * 21 * Prefetching may be fatal on some systems if we're prefetching beyond the 22 * end of memory on some systems. It's also a seriously bad idea on non 23 * dma-coherent systems. 24 */ 25#ifdef CONFIG_DMA_NONCOHERENT 26#undef CONFIG_CPU_HAS_PREFETCH 27#endif 28#ifdef CONFIG_MIPS_MALTA 29#undef CONFIG_CPU_HAS_PREFETCH 30#endif 31 32#include <asm/asm.h> 33#include <asm/asm-offsets.h> 34#include <asm/regdef.h> 35 36#define dst a0 37#define src a1 38#define len a2 39 40/* 41 * Spec 42 * 43 * memcpy copies len bytes from src to dst and sets v0 to dst. 44 * It assumes that 45 * - src and dst don't overlap 46 * - src is readable 47 * - dst is writable 48 * memcpy uses the standard calling convention 49 * 50 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to 51 * the number of uncopied bytes due to an exception caused by a read or write. 52 * __copy_user assumes that src and dst don't overlap, and that the call is 53 * implementing one of the following: 54 * copy_to_user 55 * - src is readable (no exceptions when reading src) 56 * copy_from_user 57 * - dst is writable (no exceptions when writing dst) 58 * __copy_user uses a non-standard calling convention; see 59 * include/asm-mips/uaccess.h 60 * 61 * When an exception happens on a load, the handler must 62 # ensure that all of the destination buffer is overwritten to prevent 63 * leaking information to user mode programs. 64 */ 65 66/* 67 * Implementation 68 */ 69 70/* 71 * The exception handler for loads requires that: 72 * 1- AT contain the address of the byte just past the end of the source 73 * of the copy, 74 * 2- src_entry <= src < AT, and 75 * 3- (dst - src) == (dst_entry - src_entry), 76 * The _entry suffix denotes values when __copy_user was called. 77 * 78 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user 79 * (2) is met by incrementing src by the number of bytes copied 80 * (3) is met by not doing loads between a pair of increments of dst and src 81 * 82 * The exception handlers for stores adjust len (if necessary) and return. 83 * These handlers do not need to overwrite any data. 84 * 85 * For __rmemcpy and memmove an exception is always a kernel bug, therefore 86 * they're not protected. 87 */ 88 89/* Instruction type */ 90#define LD_INSN 1 91#define ST_INSN 2 92/* Pretech type */ 93#define SRC_PREFETCH 1 94#define DST_PREFETCH 2 95#define LEGACY_MODE 1 96#define EVA_MODE 2 97#define USEROP 1 98#define KERNELOP 2 99 100/* 101 * Wrapper to add an entry in the exception table 102 * in case the insn causes a memory exception. 103 * Arguments: 104 * insn : Load/store instruction 105 * type : Instruction type 106 * reg : Register 107 * addr : Address 108 * handler : Exception handler 109 */ 110 111#define EXC(insn, type, reg, addr, handler) \ 112 .if \mode == LEGACY_MODE; \ 1139: insn reg, addr; \ 114 .section __ex_table,"a"; \ 115 PTR 9b, handler; \ 116 .previous; \ 117 /* This is assembled in EVA mode */ \ 118 .else; \ 119 /* If loading from user or storing to user */ \ 120 .if ((\from == USEROP) && (type == LD_INSN)) || \ 121 ((\to == USEROP) && (type == ST_INSN)); \ 1229: __BUILD_EVA_INSN(insn##e, reg, addr); \ 123 .section __ex_table,"a"; \ 124 PTR 9b, handler; \ 125 .previous; \ 126 .else; \ 127 /* \ 128 * Still in EVA, but no need for \ 129 * exception handler or EVA insn \ 130 */ \ 131 insn reg, addr; \ 132 .endif; \ 133 .endif 134 135/* 136 * Only on the 64-bit kernel we can made use of 64-bit registers. 137 */ 138#ifdef CONFIG_64BIT 139#define USE_DOUBLE 140#endif 141 142#ifdef USE_DOUBLE 143 144#define LOADK ld /* No exception */ 145#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) 146#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) 147#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) 148#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) 149#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) 150#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) 151#define ADD daddu 152#define SUB dsubu 153#define SRL dsrl 154#define SRA dsra 155#define SLL dsll 156#define SLLV dsllv 157#define SRLV dsrlv 158#define NBYTES 8 159#define LOG_NBYTES 3 160 161/* 162 * As we are sharing code base with the mips32 tree (which use the o32 ABI 163 * register definitions). We need to redefine the register definitions from 164 * the n64 ABI register naming to the o32 ABI register naming. 165 */ 166#undef t0 167#undef t1 168#undef t2 169#undef t3 170#define t0 $8 171#define t1 $9 172#define t2 $10 173#define t3 $11 174#define t4 $12 175#define t5 $13 176#define t6 $14 177#define t7 $15 178 179#else 180 181#define LOADK lw /* No exception */ 182#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) 183#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) 184#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) 185#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) 186#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) 187#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) 188#define ADD addu 189#define SUB subu 190#define SRL srl 191#define SLL sll 192#define SRA sra 193#define SLLV sllv 194#define SRLV srlv 195#define NBYTES 4 196#define LOG_NBYTES 2 197 198#endif /* USE_DOUBLE */ 199 200#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) 201#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) 202 203#define _PREF(hint, addr, type) \ 204 .if \mode == LEGACY_MODE; \ 205 PREF(hint, addr); \ 206 .else; \ 207 .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ 208 ((\to == USEROP) && (type == DST_PREFETCH)); \ 209 /* \ 210 * PREFE has only 9 bits for the offset \ 211 * compared to PREF which has 16, so it may \ 212 * need to use the $at register but this \ 213 * register should remain intact because it's \ 214 * used later on. Therefore use $v1. \ 215 */ \ 216 .set at=v1; \ 217 PREFE(hint, addr); \ 218 .set noat; \ 219 .else; \ 220 PREF(hint, addr); \ 221 .endif; \ 222 .endif 223 224#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) 225#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) 226 227#ifdef CONFIG_CPU_LITTLE_ENDIAN 228#define LDFIRST LOADR 229#define LDREST LOADL 230#define STFIRST STORER 231#define STREST STOREL 232#define SHIFT_DISCARD SLLV 233#else 234#define LDFIRST LOADL 235#define LDREST LOADR 236#define STFIRST STOREL 237#define STREST STORER 238#define SHIFT_DISCARD SRLV 239#endif 240 241#define FIRST(unit) ((unit)*NBYTES) 242#define REST(unit) (FIRST(unit)+NBYTES-1) 243#define UNIT(unit) FIRST(unit) 244 245#define ADDRMASK (NBYTES-1) 246 247 .text 248 .set noreorder 249#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 250 .set noat 251#else 252 .set at=v1 253#endif 254 255 .align 5 256 257 /* 258 * Macro to build the __copy_user common code 259 * Arguements: 260 * mode : LEGACY_MODE or EVA_MODE 261 * from : Source operand. USEROP or KERNELOP 262 * to : Destination operand. USEROP or KERNELOP 263 */ 264 .macro __BUILD_COPY_USER mode, from, to 265 266 /* initialize __memcpy if this the first time we execute this macro */ 267 .ifnotdef __memcpy 268 .set __memcpy, 1 269 .hidden __memcpy /* make sure it does not leak */ 270 .endif 271 272 /* 273 * Note: dst & src may be unaligned, len may be 0 274 * Temps 275 */ 276#define rem t8 277 278 R10KCBARRIER(0(ra)) 279 /* 280 * The "issue break"s below are very approximate. 281 * Issue delays for dcache fills will perturb the schedule, as will 282 * load queue full replay traps, etc. 283 * 284 * If len < NBYTES use byte operations. 285 */ 286 PREFS( 0, 0(src) ) 287 PREFD( 1, 0(dst) ) 288 sltu t2, len, NBYTES 289 and t1, dst, ADDRMASK 290 PREFS( 0, 1*32(src) ) 291 PREFD( 1, 1*32(dst) ) 292 bnez t2, .Lcopy_bytes_checklen\@ 293 and t0, src, ADDRMASK 294 PREFS( 0, 2*32(src) ) 295 PREFD( 1, 2*32(dst) ) 296 bnez t1, .Ldst_unaligned\@ 297 nop 298 bnez t0, .Lsrc_unaligned_dst_aligned\@ 299 /* 300 * use delay slot for fall-through 301 * src and dst are aligned; need to compute rem 302 */ 303.Lboth_aligned\@: 304 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 305 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES 306 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) 307 PREFS( 0, 3*32(src) ) 308 PREFD( 1, 3*32(dst) ) 309 .align 4 3101: 311 R10KCBARRIER(0(ra)) 312 LOAD(t0, UNIT(0)(src), .Ll_exc\@) 313 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) 314 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) 315 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) 316 SUB len, len, 8*NBYTES 317 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) 318 LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) 319 STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) 320 STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) 321 LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) 322 LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) 323 ADD src, src, 8*NBYTES 324 ADD dst, dst, 8*NBYTES 325 STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) 326 STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) 327 STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) 328 STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) 329 STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) 330 STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) 331 PREFS( 0, 8*32(src) ) 332 PREFD( 1, 8*32(dst) ) 333 bne len, rem, 1b 334 nop 335 336 /* 337 * len == rem == the number of bytes left to copy < 8*NBYTES 338 */ 339.Lcleanup_both_aligned\@: 340 beqz len, .Ldone\@ 341 sltu t0, len, 4*NBYTES 342 bnez t0, .Lless_than_4units\@ 343 and rem, len, (NBYTES-1) # rem = len % NBYTES 344 /* 345 * len >= 4*NBYTES 346 */ 347 LOAD( t0, UNIT(0)(src), .Ll_exc\@) 348 LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@) 349 LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@) 350 LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@) 351 SUB len, len, 4*NBYTES 352 ADD src, src, 4*NBYTES 353 R10KCBARRIER(0(ra)) 354 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) 355 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) 356 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) 357 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) 358 .set reorder /* DADDI_WAR */ 359 ADD dst, dst, 4*NBYTES 360 beqz len, .Ldone\@ 361 .set noreorder 362.Lless_than_4units\@: 363 /* 364 * rem = len % NBYTES 365 */ 366 beq rem, len, .Lcopy_bytes\@ 367 nop 3681: 369 R10KCBARRIER(0(ra)) 370 LOAD(t0, 0(src), .Ll_exc\@) 371 ADD src, src, NBYTES 372 SUB len, len, NBYTES 373 STORE(t0, 0(dst), .Ls_exc_p1u\@) 374 .set reorder /* DADDI_WAR */ 375 ADD dst, dst, NBYTES 376 bne rem, len, 1b 377 .set noreorder 378 379 /* 380 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 381 * A loop would do only a byte at a time with possible branch 382 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE 383 * because can't assume read-access to dst. Instead, use 384 * STREST dst, which doesn't require read access to dst. 385 * 386 * This code should perform better than a simple loop on modern, 387 * wide-issue mips processors because the code has fewer branches and 388 * more instruction-level parallelism. 389 */ 390#define bits t2 391 beqz len, .Ldone\@ 392 ADD t1, dst, len # t1 is just past last byte of dst 393 li bits, 8*NBYTES 394 SLL rem, len, 3 # rem = number of bits to keep 395 LOAD(t0, 0(src), .Ll_exc\@) 396 SUB bits, bits, rem # bits = number of bits to discard 397 SHIFT_DISCARD t0, t0, bits 398 STREST(t0, -1(t1), .Ls_exc\@) 399 jr ra 400 move len, zero 401.Ldst_unaligned\@: 402 /* 403 * dst is unaligned 404 * t0 = src & ADDRMASK 405 * t1 = dst & ADDRMASK; T1 > 0 406 * len >= NBYTES 407 * 408 * Copy enough bytes to align dst 409 * Set match = (src and dst have same alignment) 410 */ 411#define match rem 412 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) 413 ADD t2, zero, NBYTES 414 LDREST(t3, REST(0)(src), .Ll_exc_copy\@) 415 SUB t2, t2, t1 # t2 = number of bytes copied 416 xor match, t0, t1 417 R10KCBARRIER(0(ra)) 418 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) 419 beq len, t2, .Ldone\@ 420 SUB len, len, t2 421 ADD dst, dst, t2 422 beqz match, .Lboth_aligned\@ 423 ADD src, src, t2 424 425.Lsrc_unaligned_dst_aligned\@: 426 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 427 PREFS( 0, 3*32(src) ) 428 beqz t0, .Lcleanup_src_unaligned\@ 429 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 430 PREFD( 1, 3*32(dst) ) 4311: 432/* 433 * Avoid consecutive LD*'s to the same register since some mips 434 * implementations can't issue them in the same cycle. 435 * It's OK to load FIRST(N+1) before REST(N) because the two addresses 436 * are to the same unit (unless src is aligned, but it's not). 437 */ 438 R10KCBARRIER(0(ra)) 439 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) 440 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) 441 SUB len, len, 4*NBYTES 442 LDREST(t0, REST(0)(src), .Ll_exc_copy\@) 443 LDREST(t1, REST(1)(src), .Ll_exc_copy\@) 444 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) 445 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) 446 LDREST(t2, REST(2)(src), .Ll_exc_copy\@) 447 LDREST(t3, REST(3)(src), .Ll_exc_copy\@) 448 PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) 449 ADD src, src, 4*NBYTES 450#ifdef CONFIG_CPU_SB1 451 nop # improves slotting 452#endif 453 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) 454 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) 455 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) 456 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) 457 PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) 458 .set reorder /* DADDI_WAR */ 459 ADD dst, dst, 4*NBYTES 460 bne len, rem, 1b 461 .set noreorder 462 463.Lcleanup_src_unaligned\@: 464 beqz len, .Ldone\@ 465 and rem, len, NBYTES-1 # rem = len % NBYTES 466 beq rem, len, .Lcopy_bytes\@ 467 nop 4681: 469 R10KCBARRIER(0(ra)) 470 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) 471 LDREST(t0, REST(0)(src), .Ll_exc_copy\@) 472 ADD src, src, NBYTES 473 SUB len, len, NBYTES 474 STORE(t0, 0(dst), .Ls_exc_p1u\@) 475 .set reorder /* DADDI_WAR */ 476 ADD dst, dst, NBYTES 477 bne len, rem, 1b 478 .set noreorder 479 480.Lcopy_bytes_checklen\@: 481 beqz len, .Ldone\@ 482 nop 483.Lcopy_bytes\@: 484 /* 0 < len < NBYTES */ 485 R10KCBARRIER(0(ra)) 486#define COPY_BYTE(N) \ 487 LOADB(t0, N(src), .Ll_exc\@); \ 488 SUB len, len, 1; \ 489 beqz len, .Ldone\@; \ 490 STOREB(t0, N(dst), .Ls_exc_p1\@) 491 492 COPY_BYTE(0) 493 COPY_BYTE(1) 494#ifdef USE_DOUBLE 495 COPY_BYTE(2) 496 COPY_BYTE(3) 497 COPY_BYTE(4) 498 COPY_BYTE(5) 499#endif 500 LOADB(t0, NBYTES-2(src), .Ll_exc\@) 501 SUB len, len, 1 502 jr ra 503 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) 504.Ldone\@: 505 jr ra 506 nop 507 .if __memcpy == 1 508 END(memcpy) 509 .set __memcpy, 0 510 .hidden __memcpy 511 .endif 512 513.Ll_exc_copy\@: 514 /* 515 * Copy bytes from src until faulting load address (or until a 516 * lb faults) 517 * 518 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) 519 * may be more than a byte beyond the last address. 520 * Hence, the lb below may get an exception. 521 * 522 * Assumes src < THREAD_BUADDR($28) 523 */ 524 LOADK t0, TI_TASK($28) 525 nop 526 LOADK t0, THREAD_BUADDR(t0) 5271: 528 LOADB(t1, 0(src), .Ll_exc\@) 529 ADD src, src, 1 530 sb t1, 0(dst) # can't fault -- we're copy_from_user 531 .set reorder /* DADDI_WAR */ 532 ADD dst, dst, 1 533 bne src, t0, 1b 534 .set noreorder 535.Ll_exc\@: 536 LOADK t0, TI_TASK($28) 537 nop 538 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address 539 nop 540 SUB len, AT, t0 # len number of uncopied bytes 541 bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ 542 /* 543 * Here's where we rely on src and dst being incremented in tandem, 544 * See (3) above. 545 * dst += (fault addr - src) to put dst at first byte to clear 546 */ 547 ADD dst, t0 # compute start address in a1 548 SUB dst, src 549 /* 550 * Clear len bytes starting at dst. Can't call __bzero because it 551 * might modify len. An inefficient loop for these rare times... 552 */ 553 .set reorder /* DADDI_WAR */ 554 SUB src, len, 1 555 beqz len, .Ldone\@ 556 .set noreorder 5571: sb zero, 0(dst) 558 ADD dst, dst, 1 559#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 560 bnez src, 1b 561 SUB src, src, 1 562#else 563 .set push 564 .set noat 565 li v1, 1 566 bnez src, 1b 567 SUB src, src, v1 568 .set pop 569#endif 570 jr ra 571 nop 572 573 574#define SEXC(n) \ 575 .set reorder; /* DADDI_WAR */ \ 576.Ls_exc_p ## n ## u\@: \ 577 ADD len, len, n*NBYTES; \ 578 jr ra; \ 579 .set noreorder 580 581SEXC(8) 582SEXC(7) 583SEXC(6) 584SEXC(5) 585SEXC(4) 586SEXC(3) 587SEXC(2) 588SEXC(1) 589 590.Ls_exc_p1\@: 591 .set reorder /* DADDI_WAR */ 592 ADD len, len, 1 593 jr ra 594 .set noreorder 595.Ls_exc\@: 596 jr ra 597 nop 598 .endm 599 600 .align 5 601LEAF(memmove) 602 ADD t0, a0, a2 603 ADD t1, a1, a2 604 sltu t0, a1, t0 # dst + len <= src -> memcpy 605 sltu t1, a0, t1 # dst >= src + len -> memcpy 606 and t0, t1 607 beqz t0, .L__memcpy 608 move v0, a0 /* return value */ 609 beqz a2, .Lr_out 610 END(memmove) 611 612 /* fall through to __rmemcpy */ 613LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ 614 sltu t0, a1, a0 615 beqz t0, .Lr_end_bytes_up # src >= dst 616 nop 617 ADD a0, a2 # dst = dst + len 618 ADD a1, a2 # src = src + len 619 620.Lr_end_bytes: 621 R10KCBARRIER(0(ra)) 622 lb t0, -1(a1) 623 SUB a2, a2, 0x1 624 sb t0, -1(a0) 625 SUB a1, a1, 0x1 626 .set reorder /* DADDI_WAR */ 627 SUB a0, a0, 0x1 628 bnez a2, .Lr_end_bytes 629 .set noreorder 630 631.Lr_out: 632 jr ra 633 move a2, zero 634 635.Lr_end_bytes_up: 636 R10KCBARRIER(0(ra)) 637 lb t0, (a1) 638 SUB a2, a2, 0x1 639 sb t0, (a0) 640 ADD a1, a1, 0x1 641 .set reorder /* DADDI_WAR */ 642 ADD a0, a0, 0x1 643 bnez a2, .Lr_end_bytes_up 644 .set noreorder 645 646 jr ra 647 move a2, zero 648 END(__rmemcpy) 649 650/* 651 * t6 is used as a flag to note inatomic mode. 652 */ 653LEAF(__copy_user_inatomic) 654 b __copy_user_common 655 li t6, 1 656 END(__copy_user_inatomic) 657 658/* 659 * A combined memcpy/__copy_user 660 * __copy_user sets len to 0 for success; else to an upper bound of 661 * the number of uncopied bytes. 662 * memcpy sets v0 to dst. 663 */ 664 .align 5 665LEAF(memcpy) /* a0=dst a1=src a2=len */ 666 move v0, dst /* return value */ 667.L__memcpy: 668FEXPORT(__copy_user) 669 li t6, 0 /* not inatomic */ 670__copy_user_common: 671 /* Legacy Mode, user <-> user */ 672 __BUILD_COPY_USER LEGACY_MODE USEROP USEROP 673 674#ifdef CONFIG_EVA 675 676/* 677 * For EVA we need distinct symbols for reading and writing to user space. 678 * This is because we need to use specific EVA instructions to perform the 679 * virtual <-> physical translation when a virtual address is actually in user 680 * space 681 */ 682 683LEAF(__copy_user_inatomic_eva) 684 b __copy_from_user_common 685 li t6, 1 686 END(__copy_user_inatomic_eva) 687 688/* 689 * __copy_from_user (EVA) 690 */ 691 692LEAF(__copy_from_user_eva) 693 li t6, 0 /* not inatomic */ 694__copy_from_user_common: 695 __BUILD_COPY_USER EVA_MODE USEROP KERNELOP 696END(__copy_from_user_eva) 697 698 699 700/* 701 * __copy_to_user (EVA) 702 */ 703 704LEAF(__copy_to_user_eva) 705__BUILD_COPY_USER EVA_MODE KERNELOP USEROP 706END(__copy_to_user_eva) 707 708/* 709 * __copy_in_user (EVA) 710 */ 711 712LEAF(__copy_in_user_eva) 713__BUILD_COPY_USER EVA_MODE USEROP USEROP 714END(__copy_in_user_eva) 715 716#endif 717