xref: /linux/arch/powerpc/boot/div64.S (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1/*
2 * Divide a 64-bit unsigned number by a 32-bit unsigned number.
3 * This routine assumes that the top 32 bits of the dividend are
4 * non-zero to start with.
5 * On entry, r3 points to the dividend, which get overwritten with
6 * the 64-bit quotient, and r4 contains the divisor.
7 * On exit, r3 contains the remainder.
8 *
9 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16#include "ppc_asm.h"
17
18	.globl __div64_32
19__div64_32:
20	lwz	r5,0(r3)	# get the dividend into r5/r6
21	lwz	r6,4(r3)
22	cmplw	r5,r4
23	li	r7,0
24	li	r8,0
25	blt	1f
26	divwu	r7,r5,r4	# if dividend.hi >= divisor,
27	mullw	r0,r7,r4	# quotient.hi = dividend.hi / divisor
28	subf.	r5,r0,r5	# dividend.hi %= divisor
29	beq	3f
301:	mr	r11,r5		# here dividend.hi != 0
31	andis.	r0,r5,0xc000
32	bne	2f
33	cntlzw	r0,r5		# we are shifting the dividend right
34	li	r10,-1		# to make it < 2^32, and shifting
35	srw	r10,r10,r0	# the divisor right the same amount,
36	addc	r9,r4,r10	# rounding up (so the estimate cannot
37	andc	r11,r6,r10	# ever be too large, only too small)
38	andc	r9,r9,r10
39	addze	r9,r9
40	or	r11,r5,r11
41	rotlw	r9,r9,r0
42	rotlw	r11,r11,r0
43	divwu	r11,r11,r9	# then we divide the shifted quantities
442:	mullw	r10,r11,r4	# to get an estimate of the quotient,
45	mulhwu	r9,r11,r4	# multiply the estimate by the divisor,
46	subfc	r6,r10,r6	# take the product from the divisor,
47	add	r8,r8,r11	# and add the estimate to the accumulated
48	subfe.	r5,r9,r5	# quotient
49	bne	1b
503:	cmplw	r6,r4
51	blt	4f
52	divwu	r0,r6,r4	# perform the remaining 32-bit division
53	mullw	r10,r0,r4	# and get the remainder
54	add	r8,r8,r0
55	subf	r6,r10,r6
564:	stw	r7,0(r3)	# return the quotient in *r3
57	stw	r8,4(r3)
58	mr	r3,r6		# return the remainder in r3
59	blr
60
61/*
62 * Extended precision shifts.
63 *
64 * Updated to be valid for shift counts from 0 to 63 inclusive.
65 * -- Gabriel
66 *
67 * R3/R4 has 64 bit value
68 * R5    has shift count
69 * result in R3/R4
70 *
71 *  ashrdi3: arithmetic right shift (sign propagation)
72 *  lshrdi3: logical right shift
73 *  ashldi3: left shift
74 */
75	.globl __ashrdi3
76__ashrdi3:
77	subfic	r6,r5,32
78	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
79	addi	r7,r5,32	# could be xori, or addi with -32
80	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
81	rlwinm	r8,r7,0,32	# t3 = (count < 32) ? 32 : 0
82	sraw	r7,r3,r7	# t2 = MSW >> (count-32)
83	or	r4,r4,r6	# LSW |= t1
84	slw	r7,r7,r8	# t2 = (count < 32) ? 0 : t2
85	sraw	r3,r3,r5	# MSW = MSW >> count
86	or	r4,r4,r7	# LSW |= t2
87	blr
88
89	.globl __ashldi3
90__ashldi3:
91	subfic	r6,r5,32
92	slw	r3,r3,r5	# MSW = count > 31 ? 0 : MSW << count
93	addi	r7,r5,32	# could be xori, or addi with -32
94	srw	r6,r4,r6	# t1 = count > 31 ? 0 : LSW >> (32-count)
95	slw	r7,r4,r7	# t2 = count < 32 ? 0 : LSW << (count-32)
96	or	r3,r3,r6	# MSW |= t1
97	slw	r4,r4,r5	# LSW = LSW << count
98	or	r3,r3,r7	# MSW |= t2
99	blr
100
101	.globl __lshrdi3
102__lshrdi3:
103	subfic	r6,r5,32
104	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
105	addi	r7,r5,32	# could be xori, or addi with -32
106	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
107	srw	r7,r3,r7	# t2 = count < 32 ? 0 : MSW >> (count-32)
108	or	r4,r4,r6	# LSW |= t1
109	srw	r3,r3,r5	# MSW = MSW >> count
110	or	r4,r4,r7	# LSW |= t2
111	blr
112