xref: /linux/arch/m68k/math-emu/fp_arith.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 
4    fp_arith.c: floating-point math routines for the Linux-m68k
5    floating point emulator.
6 
7    Copyright (c) 1998-1999 David Huggins-Daines.
8 
9    Somewhat based on the AlphaLinux floating point emulator, by David
10    Mosberger-Tang.
11 
12  */
13 
14 #include "fp_emu.h"
15 #include "multi_arith.h"
16 #include "fp_arith.h"
17 
18 const struct fp_ext fp_QNaN =
19 {
20 	.exp = 0x7fff,
21 	.mant = { .m64 = ~0 }
22 };
23 
24 const struct fp_ext fp_Inf =
25 {
26 	.exp = 0x7fff,
27 };
28 
29 /* let's start with the easy ones */
30 
31 struct fp_ext *fp_fabs(struct fp_ext *dest, struct fp_ext *src)
32 {
33 	dprint(PINSTR, "fabs\n");
34 
35 	fp_monadic_check(dest, src);
36 
37 	dest->sign = 0;
38 
39 	return dest;
40 }
41 
42 struct fp_ext *fp_fneg(struct fp_ext *dest, struct fp_ext *src)
43 {
44 	dprint(PINSTR, "fneg\n");
45 
46 	fp_monadic_check(dest, src);
47 
48 	dest->sign = !dest->sign;
49 
50 	return dest;
51 }
52 
53 /* Now, the slightly harder ones */
54 
55 /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
56    FDSUB, and FCMP instructions. */
57 
58 struct fp_ext *fp_fadd(struct fp_ext *dest, struct fp_ext *src)
59 {
60 	int diff;
61 
62 	dprint(PINSTR, "fadd\n");
63 
64 	fp_dyadic_check(dest, src);
65 
66 	if (IS_INF(dest)) {
67 		/* infinity - infinity == NaN */
68 		if (IS_INF(src) && (src->sign != dest->sign))
69 			fp_set_nan(dest);
70 		return dest;
71 	}
72 	if (IS_INF(src)) {
73 		fp_copy_ext(dest, src);
74 		return dest;
75 	}
76 
77 	if (IS_ZERO(dest)) {
78 		if (IS_ZERO(src)) {
79 			if (src->sign != dest->sign) {
80 				if (FPDATA->rnd == FPCR_ROUND_RM)
81 					dest->sign = 1;
82 				else
83 					dest->sign = 0;
84 			}
85 		} else
86 			fp_copy_ext(dest, src);
87 		return dest;
88 	}
89 
90 	dest->lowmant = src->lowmant = 0;
91 
92 	if ((diff = dest->exp - src->exp) > 0)
93 		fp_denormalize(src, diff);
94 	else if ((diff = -diff) > 0)
95 		fp_denormalize(dest, diff);
96 
97 	if (dest->sign == src->sign) {
98 		if (fp_addmant(dest, src))
99 			if (!fp_addcarry(dest))
100 				return dest;
101 	} else {
102 		if (dest->mant.m64 < src->mant.m64) {
103 			fp_submant(dest, src, dest);
104 			dest->sign = !dest->sign;
105 		} else
106 			fp_submant(dest, dest, src);
107 	}
108 
109 	return dest;
110 }
111 
112 /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
113    instructions.
114 
115    Remember that the arguments are in assembler-syntax order! */
116 
117 struct fp_ext *fp_fsub(struct fp_ext *dest, struct fp_ext *src)
118 {
119 	dprint(PINSTR, "fsub ");
120 
121 	src->sign = !src->sign;
122 	return fp_fadd(dest, src);
123 }
124 
125 
126 struct fp_ext *fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
127 {
128 	dprint(PINSTR, "fcmp ");
129 
130 	FPDATA->temp[1] = *dest;
131 	src->sign = !src->sign;
132 	return fp_fadd(&FPDATA->temp[1], src);
133 }
134 
135 struct fp_ext *fp_ftst(struct fp_ext *dest, struct fp_ext *src)
136 {
137 	dprint(PINSTR, "ftst\n");
138 
139 	(void)dest;
140 
141 	return src;
142 }
143 
144 struct fp_ext *fp_fmul(struct fp_ext *dest, struct fp_ext *src)
145 {
146 	union fp_mant128 temp;
147 	int exp;
148 
149 	dprint(PINSTR, "fmul\n");
150 
151 	fp_dyadic_check(dest, src);
152 
153 	/* calculate the correct sign now, as it's necessary for infinities */
154 	dest->sign = src->sign ^ dest->sign;
155 
156 	/* Handle infinities */
157 	if (IS_INF(dest)) {
158 		if (IS_ZERO(src))
159 			fp_set_nan(dest);
160 		return dest;
161 	}
162 	if (IS_INF(src)) {
163 		if (IS_ZERO(dest))
164 			fp_set_nan(dest);
165 		else
166 			fp_copy_ext(dest, src);
167 		return dest;
168 	}
169 
170 	/* Of course, as we all know, zero * anything = zero.  You may
171 	   not have known that it might be a positive or negative
172 	   zero... */
173 	if (IS_ZERO(dest) || IS_ZERO(src)) {
174 		dest->exp = 0;
175 		dest->mant.m64 = 0;
176 		dest->lowmant = 0;
177 
178 		return dest;
179 	}
180 
181 	exp = dest->exp + src->exp - 0x3ffe;
182 
183 	/* shift up the mantissa for denormalized numbers,
184 	   so that the highest bit is set, this makes the
185 	   shift of the result below easier */
186 	if ((long)dest->mant.m32[0] >= 0)
187 		exp -= fp_overnormalize(dest);
188 	if ((long)src->mant.m32[0] >= 0)
189 		exp -= fp_overnormalize(src);
190 
191 	/* now, do a 64-bit multiply with expansion */
192 	fp_multiplymant(&temp, dest, src);
193 
194 	/* normalize it back to 64 bits and stuff it back into the
195 	   destination struct */
196 	if ((long)temp.m32[0] > 0) {
197 		exp--;
198 		fp_putmant128(dest, &temp, 1);
199 	} else
200 		fp_putmant128(dest, &temp, 0);
201 
202 	if (exp >= 0x7fff) {
203 		fp_set_ovrflw(dest);
204 		return dest;
205 	}
206 	dest->exp = exp;
207 	if (exp < 0) {
208 		fp_set_sr(FPSR_EXC_UNFL);
209 		fp_denormalize(dest, -exp);
210 	}
211 
212 	return dest;
213 }
214 
215 /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
216    FSGLDIV instructions.
217 
218    Note that the order of the operands is counter-intuitive: instead
219    of src / dest, the result is actually dest / src. */
220 
221 struct fp_ext *fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
222 {
223 	union fp_mant128 temp;
224 	int exp;
225 
226 	dprint(PINSTR, "fdiv\n");
227 
228 	fp_dyadic_check(dest, src);
229 
230 	/* calculate the correct sign now, as it's necessary for infinities */
231 	dest->sign = src->sign ^ dest->sign;
232 
233 	/* Handle infinities */
234 	if (IS_INF(dest)) {
235 		/* infinity / infinity = NaN (quiet, as always) */
236 		if (IS_INF(src))
237 			fp_set_nan(dest);
238 		/* infinity / anything else = infinity (with appropriate sign) */
239 		return dest;
240 	}
241 	if (IS_INF(src)) {
242 		/* anything / infinity = zero (with appropriate sign) */
243 		dest->exp = 0;
244 		dest->mant.m64 = 0;
245 		dest->lowmant = 0;
246 
247 		return dest;
248 	}
249 
250 	/* zeroes */
251 	if (IS_ZERO(dest)) {
252 		/* zero / zero = NaN */
253 		if (IS_ZERO(src))
254 			fp_set_nan(dest);
255 		/* zero / anything else = zero */
256 		return dest;
257 	}
258 	if (IS_ZERO(src)) {
259 		/* anything / zero = infinity (with appropriate sign) */
260 		fp_set_sr(FPSR_EXC_DZ);
261 		dest->exp = 0x7fff;
262 		dest->mant.m64 = 0;
263 
264 		return dest;
265 	}
266 
267 	exp = dest->exp - src->exp + 0x3fff;
268 
269 	/* shift up the mantissa for denormalized numbers,
270 	   so that the highest bit is set, this makes lots
271 	   of things below easier */
272 	if ((long)dest->mant.m32[0] >= 0)
273 		exp -= fp_overnormalize(dest);
274 	if ((long)src->mant.m32[0] >= 0)
275 		exp -= fp_overnormalize(src);
276 
277 	/* now, do the 64-bit divide */
278 	fp_dividemant(&temp, dest, src);
279 
280 	/* normalize it back to 64 bits and stuff it back into the
281 	   destination struct */
282 	if (!temp.m32[0]) {
283 		exp--;
284 		fp_putmant128(dest, &temp, 32);
285 	} else
286 		fp_putmant128(dest, &temp, 31);
287 
288 	if (exp >= 0x7fff) {
289 		fp_set_ovrflw(dest);
290 		return dest;
291 	}
292 	dest->exp = exp;
293 	if (exp < 0) {
294 		fp_set_sr(FPSR_EXC_UNFL);
295 		fp_denormalize(dest, -exp);
296 	}
297 
298 	return dest;
299 }
300 
301 struct fp_ext *fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
302 {
303 	int exp;
304 
305 	dprint(PINSTR, "fsglmul\n");
306 
307 	fp_dyadic_check(dest, src);
308 
309 	/* calculate the correct sign now, as it's necessary for infinities */
310 	dest->sign = src->sign ^ dest->sign;
311 
312 	/* Handle infinities */
313 	if (IS_INF(dest)) {
314 		if (IS_ZERO(src))
315 			fp_set_nan(dest);
316 		return dest;
317 	}
318 	if (IS_INF(src)) {
319 		if (IS_ZERO(dest))
320 			fp_set_nan(dest);
321 		else
322 			fp_copy_ext(dest, src);
323 		return dest;
324 	}
325 
326 	/* Of course, as we all know, zero * anything = zero.  You may
327 	   not have known that it might be a positive or negative
328 	   zero... */
329 	if (IS_ZERO(dest) || IS_ZERO(src)) {
330 		dest->exp = 0;
331 		dest->mant.m64 = 0;
332 		dest->lowmant = 0;
333 
334 		return dest;
335 	}
336 
337 	exp = dest->exp + src->exp - 0x3ffe;
338 
339 	/* do a 32-bit multiply */
340 	fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
341 		 dest->mant.m32[0] & 0xffffff00,
342 		 src->mant.m32[0] & 0xffffff00);
343 
344 	if (exp >= 0x7fff) {
345 		fp_set_ovrflw(dest);
346 		return dest;
347 	}
348 	dest->exp = exp;
349 	if (exp < 0) {
350 		fp_set_sr(FPSR_EXC_UNFL);
351 		fp_denormalize(dest, -exp);
352 	}
353 
354 	return dest;
355 }
356 
357 struct fp_ext *fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
358 {
359 	int exp;
360 	unsigned long quot, rem;
361 
362 	dprint(PINSTR, "fsgldiv\n");
363 
364 	fp_dyadic_check(dest, src);
365 
366 	/* calculate the correct sign now, as it's necessary for infinities */
367 	dest->sign = src->sign ^ dest->sign;
368 
369 	/* Handle infinities */
370 	if (IS_INF(dest)) {
371 		/* infinity / infinity = NaN (quiet, as always) */
372 		if (IS_INF(src))
373 			fp_set_nan(dest);
374 		/* infinity / anything else = infinity (with approprate sign) */
375 		return dest;
376 	}
377 	if (IS_INF(src)) {
378 		/* anything / infinity = zero (with appropriate sign) */
379 		dest->exp = 0;
380 		dest->mant.m64 = 0;
381 		dest->lowmant = 0;
382 
383 		return dest;
384 	}
385 
386 	/* zeroes */
387 	if (IS_ZERO(dest)) {
388 		/* zero / zero = NaN */
389 		if (IS_ZERO(src))
390 			fp_set_nan(dest);
391 		/* zero / anything else = zero */
392 		return dest;
393 	}
394 	if (IS_ZERO(src)) {
395 		/* anything / zero = infinity (with appropriate sign) */
396 		fp_set_sr(FPSR_EXC_DZ);
397 		dest->exp = 0x7fff;
398 		dest->mant.m64 = 0;
399 
400 		return dest;
401 	}
402 
403 	exp = dest->exp - src->exp + 0x3fff;
404 
405 	dest->mant.m32[0] &= 0xffffff00;
406 	src->mant.m32[0] &= 0xffffff00;
407 
408 	/* do the 32-bit divide */
409 	if (dest->mant.m32[0] >= src->mant.m32[0]) {
410 		fp_sub64(dest->mant, src->mant);
411 		fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
412 		dest->mant.m32[0] = 0x80000000 | (quot >> 1);
413 		dest->mant.m32[1] = (quot & 1) | rem;	/* only for rounding */
414 	} else {
415 		fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
416 		dest->mant.m32[0] = quot;
417 		dest->mant.m32[1] = rem;		/* only for rounding */
418 		exp--;
419 	}
420 
421 	if (exp >= 0x7fff) {
422 		fp_set_ovrflw(dest);
423 		return dest;
424 	}
425 	dest->exp = exp;
426 	if (exp < 0) {
427 		fp_set_sr(FPSR_EXC_UNFL);
428 		fp_denormalize(dest, -exp);
429 	}
430 
431 	return dest;
432 }
433 
434 /* fp_roundint: Internal rounding function for use by several of these
435    emulated instructions.
436 
437    This one rounds off the fractional part using the rounding mode
438    specified. */
439 
440 static void fp_roundint(struct fp_ext *dest, int mode)
441 {
442 	union fp_mant64 oldmant;
443 	unsigned long mask;
444 
445 	if (!fp_normalize_ext(dest))
446 		return;
447 
448 	/* infinities and zeroes */
449 	if (IS_INF(dest) || IS_ZERO(dest))
450 		return;
451 
452 	/* first truncate the lower bits */
453 	oldmant = dest->mant;
454 	switch (dest->exp) {
455 	case 0 ... 0x3ffe:
456 		dest->mant.m64 = 0;
457 		break;
458 	case 0x3fff ... 0x401e:
459 		dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
460 		dest->mant.m32[1] = 0;
461 		if (oldmant.m64 == dest->mant.m64)
462 			return;
463 		break;
464 	case 0x401f ... 0x403e:
465 		dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
466 		if (oldmant.m32[1] == dest->mant.m32[1])
467 			return;
468 		break;
469 	default:
470 		return;
471 	}
472 	fp_set_sr(FPSR_EXC_INEX2);
473 
474 	/* We might want to normalize upwards here... however, since
475 	   we know that this is only called on the output of fp_fdiv,
476 	   or with the input to fp_fint or fp_fintrz, and the inputs
477 	   to all these functions are either normal or denormalized
478 	   (no subnormals allowed!), there's really no need.
479 
480 	   In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
481 	   0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
482 	   smallest possible normal dividend and the largest possible normal
483 	   divisor will still produce a normal quotient, therefore, (normal
484 	   << 64) / normal is normal in all cases) */
485 
486 	switch (mode) {
487 	case FPCR_ROUND_RN:
488 		switch (dest->exp) {
489 		case 0 ... 0x3ffd:
490 			return;
491 		case 0x3ffe:
492 			/* As noted above, the input is always normal, so the
493 			   guard bit (bit 63) is always set.  therefore, the
494 			   only case in which we will NOT round to 1.0 is when
495 			   the input is exactly 0.5. */
496 			if (oldmant.m64 == (1ULL << 63))
497 				return;
498 			break;
499 		case 0x3fff ... 0x401d:
500 			mask = 1 << (0x401d - dest->exp);
501 			if (!(oldmant.m32[0] & mask))
502 				return;
503 			if (oldmant.m32[0] & (mask << 1))
504 				break;
505 			if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
506 					!oldmant.m32[1])
507 				return;
508 			break;
509 		case 0x401e:
510 			if (oldmant.m32[1] & 0x80000000)
511 				return;
512 			if (oldmant.m32[0] & 1)
513 				break;
514 			if (!(oldmant.m32[1] << 1))
515 				return;
516 			break;
517 		case 0x401f ... 0x403d:
518 			mask = 1 << (0x403d - dest->exp);
519 			if (!(oldmant.m32[1] & mask))
520 				return;
521 			if (oldmant.m32[1] & (mask << 1))
522 				break;
523 			if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
524 				return;
525 			break;
526 		default:
527 			return;
528 		}
529 		break;
530 	case FPCR_ROUND_RZ:
531 		return;
532 	default:
533 		if (dest->sign ^ (mode - FPCR_ROUND_RM))
534 			break;
535 		return;
536 	}
537 
538 	switch (dest->exp) {
539 	case 0 ... 0x3ffe:
540 		dest->exp = 0x3fff;
541 		dest->mant.m64 = 1ULL << 63;
542 		break;
543 	case 0x3fff ... 0x401e:
544 		mask = 1 << (0x401e - dest->exp);
545 		if (dest->mant.m32[0] += mask)
546 			break;
547 		dest->mant.m32[0] = 0x80000000;
548 		dest->exp++;
549 		break;
550 	case 0x401f ... 0x403e:
551 		mask = 1 << (0x403e - dest->exp);
552 		if (dest->mant.m32[1] += mask)
553 			break;
554 		if (dest->mant.m32[0] += 1)
555                         break;
556 		dest->mant.m32[0] = 0x80000000;
557                 dest->exp++;
558 		break;
559 	}
560 }
561 
562 /* modrem_kernel: Implementation of the FREM and FMOD instructions
563    (which are exactly the same, except for the rounding used on the
564    intermediate value) */
565 
566 static struct fp_ext *modrem_kernel(struct fp_ext *dest, struct fp_ext *src,
567 				    int mode)
568 {
569 	struct fp_ext tmp;
570 
571 	fp_dyadic_check(dest, src);
572 
573 	/* Infinities and zeros */
574 	if (IS_INF(dest) || IS_ZERO(src)) {
575 		fp_set_nan(dest);
576 		return dest;
577 	}
578 	if (IS_ZERO(dest) || IS_INF(src))
579 		return dest;
580 
581 	/* FIXME: there is almost certainly a smarter way to do this */
582 	fp_copy_ext(&tmp, dest);
583 	fp_fdiv(&tmp, src);		/* NOTE: src might be modified */
584 	fp_roundint(&tmp, mode);
585 	fp_fmul(&tmp, src);
586 	fp_fsub(dest, &tmp);
587 
588 	/* set the quotient byte */
589 	fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
590 	return dest;
591 }
592 
593 /* fp_fmod: Implements the kernel of the FMOD instruction.
594 
595    Again, the argument order is backwards.  The result, as defined in
596    the Motorola manuals, is:
597 
598    fmod(src,dest) = (dest - (src * floor(dest / src))) */
599 
600 struct fp_ext *fp_fmod(struct fp_ext *dest, struct fp_ext *src)
601 {
602 	dprint(PINSTR, "fmod\n");
603 	return modrem_kernel(dest, src, FPCR_ROUND_RZ);
604 }
605 
606 /* fp_frem: Implements the kernel of the FREM instruction.
607 
608    frem(src,dest) = (dest - (src * round(dest / src)))
609  */
610 
611 struct fp_ext *fp_frem(struct fp_ext *dest, struct fp_ext *src)
612 {
613 	dprint(PINSTR, "frem\n");
614 	return modrem_kernel(dest, src, FPCR_ROUND_RN);
615 }
616 
617 struct fp_ext *fp_fint(struct fp_ext *dest, struct fp_ext *src)
618 {
619 	dprint(PINSTR, "fint\n");
620 
621 	fp_copy_ext(dest, src);
622 
623 	fp_roundint(dest, FPDATA->rnd);
624 
625 	return dest;
626 }
627 
628 struct fp_ext *fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
629 {
630 	dprint(PINSTR, "fintrz\n");
631 
632 	fp_copy_ext(dest, src);
633 
634 	fp_roundint(dest, FPCR_ROUND_RZ);
635 
636 	return dest;
637 }
638 
639 struct fp_ext *fp_fscale(struct fp_ext *dest, struct fp_ext *src)
640 {
641 	int scale, oldround;
642 
643 	dprint(PINSTR, "fscale\n");
644 
645 	fp_dyadic_check(dest, src);
646 
647 	/* Infinities */
648 	if (IS_INF(src)) {
649 		fp_set_nan(dest);
650 		return dest;
651 	}
652 	if (IS_INF(dest))
653 		return dest;
654 
655 	/* zeroes */
656 	if (IS_ZERO(src) || IS_ZERO(dest))
657 		return dest;
658 
659 	/* Source exponent out of range */
660 	if (src->exp >= 0x400c) {
661 		fp_set_ovrflw(dest);
662 		return dest;
663 	}
664 
665 	/* src must be rounded with round to zero. */
666 	oldround = FPDATA->rnd;
667 	FPDATA->rnd = FPCR_ROUND_RZ;
668 	scale = fp_conv_ext2long(src);
669 	FPDATA->rnd = oldround;
670 
671 	/* new exponent */
672 	scale += dest->exp;
673 
674 	if (scale >= 0x7fff) {
675 		fp_set_ovrflw(dest);
676 	} else if (scale <= 0) {
677 		fp_set_sr(FPSR_EXC_UNFL);
678 		fp_denormalize(dest, -scale);
679 	} else
680 		dest->exp = scale;
681 
682 	return dest;
683 }
684 
685