1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 by Waldorf Electronics 7 * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2007, 2014 Maciej W. Rozycki 10 */ 11 #include <linux/delay.h> 12 #include <linux/export.h> 13 #include <linux/param.h> 14 #include <linux/smp.h> 15 #include <linux/stringify.h> 16 17 #include <asm/asm.h> 18 #include <asm/compiler.h> 19 #include <asm/war.h> 20 21 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 22 #define GCC_DADDI_IMM_ASM() "I" 23 #else 24 #define GCC_DADDI_IMM_ASM() "r" 25 #endif 26 27 #ifndef CONFIG_HAVE_PLAT_DELAY 28 29 void __delay(unsigned long loops) 30 { 31 __asm__ __volatile__ ( 32 " .set noreorder \n" 33 " .align 3 \n" 34 "1: bnez %0, 1b \n" 35 " " __stringify(LONG_SUBU) " %0, %1 \n" 36 " .set reorder \n" 37 : "=r" (loops) 38 : GCC_DADDI_IMM_ASM() (1), "0" (loops)); 39 } 40 EXPORT_SYMBOL(__delay); 41 42 /* 43 * Division by multiplication: you don't have to worry about 44 * loss of precision. 45 * 46 * Use only for very small delays ( < 1 msec). Should probably use a 47 * lookup table, really, as the multiplications take much too long with 48 * short delays. This is a "reasonable" implementation, though (and the 49 * first constant multiplications gets optimized away if the delay is 50 * a constant) 51 */ 52 53 void __udelay(unsigned long us) 54 { 55 unsigned int lpj = raw_current_cpu_data.udelay_val; 56 57 __delay((us * 0x000010c7ull * HZ * lpj) >> 32); 58 } 59 EXPORT_SYMBOL(__udelay); 60 61 void __ndelay(unsigned long ns) 62 { 63 unsigned int lpj = raw_current_cpu_data.udelay_val; 64 65 __delay((ns * 0x00000005ull * HZ * lpj) >> 32); 66 } 67 EXPORT_SYMBOL(__ndelay); 68 69 #endif 70