xref: /linux/arch/x86/math-emu/shr_Xsig.S (revision 722ecdbce68a87de2d9296f91308f44ea900a039)
1/* SPDX-License-Identifier: GPL-2.0 */
2	.file	"shr_Xsig.S"
3/*---------------------------------------------------------------------------+
4 |  shr_Xsig.S                                                               |
5 |                                                                           |
6 | 12 byte right shift function                                              |
7 |                                                                           |
8 | Copyright (C) 1992,1994,1995                                              |
9 |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
10 |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
11 |                                                                           |
12 | Call from C as:                                                           |
13 |   void shr_Xsig(Xsig *arg, unsigned nr)                                   |
14 |                                                                           |
15 |   Extended shift right function.                                          |
16 |   Fastest for small shifts.                                               |
17 |   Shifts the 12 byte quantity pointed to by the first arg (arg)           |
18 |   right by the number of bits specified by the second arg (nr).           |
19 |                                                                           |
20 +---------------------------------------------------------------------------*/
21
22#include "fpu_emu.h"
23
24.text
25SYM_FUNC_START(shr_Xsig)
26	push	%ebp
27	movl	%esp,%ebp
28	pushl	%esi
29	movl	PARAM2,%ecx
30	movl	PARAM1,%esi
31	cmpl	$32,%ecx	/* shrd only works for 0..31 bits */
32	jnc	L_more_than_31
33
34/* less than 32 bits */
35	pushl	%ebx
36	movl	(%esi),%eax	/* lsl */
37	movl	4(%esi),%ebx	/* midl */
38	movl	8(%esi),%edx	/* msl */
39	shrd	%cl,%ebx,%eax
40	shrd	%cl,%edx,%ebx
41	shr	%cl,%edx
42	movl	%eax,(%esi)
43	movl	%ebx,4(%esi)
44	movl	%edx,8(%esi)
45	popl	%ebx
46	popl	%esi
47	leave
48	RET
49
50L_more_than_31:
51	cmpl	$64,%ecx
52	jnc	L_more_than_63
53
54	subb	$32,%cl
55	movl	4(%esi),%eax	/* midl */
56	movl	8(%esi),%edx	/* msl */
57	shrd	%cl,%edx,%eax
58	shr	%cl,%edx
59	movl	%eax,(%esi)
60	movl	%edx,4(%esi)
61	movl	$0,8(%esi)
62	popl	%esi
63	leave
64	RET
65
66L_more_than_63:
67	cmpl	$96,%ecx
68	jnc	L_more_than_95
69
70	subb	$64,%cl
71	movl	8(%esi),%eax	/* msl */
72	shr	%cl,%eax
73	xorl	%edx,%edx
74	movl	%eax,(%esi)
75	movl	%edx,4(%esi)
76	movl	%edx,8(%esi)
77	popl	%esi
78	leave
79	RET
80
81L_more_than_95:
82	xorl	%eax,%eax
83	movl	%eax,(%esi)
84	movl	%eax,4(%esi)
85	movl	%eax,8(%esi)
86	popl	%esi
87	leave
88	RET
89SYM_FUNC_END(shr_Xsig)
90