xref: /linux/arch/m68k/math-emu/fp_arith.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 
4    fp_arith.c: floating-point math routines for the Linux-m68k
5    floating point emulator.
6 
7    Copyright (c) 1998-1999 David Huggins-Daines.
8 
9    Somewhat based on the AlphaLinux floating point emulator, by David
10    Mosberger-Tang.
11 
12  */
13 
14 #include "fp_emu.h"
15 #include "multi_arith.h"
16 #include "fp_arith.h"
17 
18 const struct fp_ext fp_QNaN =
19 {
20 	.exp = 0x7fff,
21 	.mant = { .m64 = ~0 }
22 };
23 
24 const struct fp_ext fp_Inf =
25 {
26 	.exp = 0x7fff,
27 };
28 
29 /* let's start with the easy ones */
30 
31 struct fp_ext *
32 fp_fabs(struct fp_ext *dest, struct fp_ext *src)
33 {
34 	dprint(PINSTR, "fabs\n");
35 
36 	fp_monadic_check(dest, src);
37 
38 	dest->sign = 0;
39 
40 	return dest;
41 }
42 
43 struct fp_ext *
44 fp_fneg(struct fp_ext *dest, struct fp_ext *src)
45 {
46 	dprint(PINSTR, "fneg\n");
47 
48 	fp_monadic_check(dest, src);
49 
50 	dest->sign = !dest->sign;
51 
52 	return dest;
53 }
54 
55 /* Now, the slightly harder ones */
56 
57 /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
58    FDSUB, and FCMP instructions. */
59 
60 struct fp_ext *
61 fp_fadd(struct fp_ext *dest, struct fp_ext *src)
62 {
63 	int diff;
64 
65 	dprint(PINSTR, "fadd\n");
66 
67 	fp_dyadic_check(dest, src);
68 
69 	if (IS_INF(dest)) {
70 		/* infinity - infinity == NaN */
71 		if (IS_INF(src) && (src->sign != dest->sign))
72 			fp_set_nan(dest);
73 		return dest;
74 	}
75 	if (IS_INF(src)) {
76 		fp_copy_ext(dest, src);
77 		return dest;
78 	}
79 
80 	if (IS_ZERO(dest)) {
81 		if (IS_ZERO(src)) {
82 			if (src->sign != dest->sign) {
83 				if (FPDATA->rnd == FPCR_ROUND_RM)
84 					dest->sign = 1;
85 				else
86 					dest->sign = 0;
87 			}
88 		} else
89 			fp_copy_ext(dest, src);
90 		return dest;
91 	}
92 
93 	dest->lowmant = src->lowmant = 0;
94 
95 	if ((diff = dest->exp - src->exp) > 0)
96 		fp_denormalize(src, diff);
97 	else if ((diff = -diff) > 0)
98 		fp_denormalize(dest, diff);
99 
100 	if (dest->sign == src->sign) {
101 		if (fp_addmant(dest, src))
102 			if (!fp_addcarry(dest))
103 				return dest;
104 	} else {
105 		if (dest->mant.m64 < src->mant.m64) {
106 			fp_submant(dest, src, dest);
107 			dest->sign = !dest->sign;
108 		} else
109 			fp_submant(dest, dest, src);
110 	}
111 
112 	return dest;
113 }
114 
115 /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
116    instructions.
117 
118    Remember that the arguments are in assembler-syntax order! */
119 
120 struct fp_ext *
121 fp_fsub(struct fp_ext *dest, struct fp_ext *src)
122 {
123 	dprint(PINSTR, "fsub ");
124 
125 	src->sign = !src->sign;
126 	return fp_fadd(dest, src);
127 }
128 
129 
130 struct fp_ext *
131 fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
132 {
133 	dprint(PINSTR, "fcmp ");
134 
135 	FPDATA->temp[1] = *dest;
136 	src->sign = !src->sign;
137 	return fp_fadd(&FPDATA->temp[1], src);
138 }
139 
140 struct fp_ext *
141 fp_ftst(struct fp_ext *dest, struct fp_ext *src)
142 {
143 	dprint(PINSTR, "ftst\n");
144 
145 	(void)dest;
146 
147 	return src;
148 }
149 
150 struct fp_ext *
151 fp_fmul(struct fp_ext *dest, struct fp_ext *src)
152 {
153 	union fp_mant128 temp;
154 	int exp;
155 
156 	dprint(PINSTR, "fmul\n");
157 
158 	fp_dyadic_check(dest, src);
159 
160 	/* calculate the correct sign now, as it's necessary for infinities */
161 	dest->sign = src->sign ^ dest->sign;
162 
163 	/* Handle infinities */
164 	if (IS_INF(dest)) {
165 		if (IS_ZERO(src))
166 			fp_set_nan(dest);
167 		return dest;
168 	}
169 	if (IS_INF(src)) {
170 		if (IS_ZERO(dest))
171 			fp_set_nan(dest);
172 		else
173 			fp_copy_ext(dest, src);
174 		return dest;
175 	}
176 
177 	/* Of course, as we all know, zero * anything = zero.  You may
178 	   not have known that it might be a positive or negative
179 	   zero... */
180 	if (IS_ZERO(dest) || IS_ZERO(src)) {
181 		dest->exp = 0;
182 		dest->mant.m64 = 0;
183 		dest->lowmant = 0;
184 
185 		return dest;
186 	}
187 
188 	exp = dest->exp + src->exp - 0x3ffe;
189 
190 	/* shift up the mantissa for denormalized numbers,
191 	   so that the highest bit is set, this makes the
192 	   shift of the result below easier */
193 	if ((long)dest->mant.m32[0] >= 0)
194 		exp -= fp_overnormalize(dest);
195 	if ((long)src->mant.m32[0] >= 0)
196 		exp -= fp_overnormalize(src);
197 
198 	/* now, do a 64-bit multiply with expansion */
199 	fp_multiplymant(&temp, dest, src);
200 
201 	/* normalize it back to 64 bits and stuff it back into the
202 	   destination struct */
203 	if ((long)temp.m32[0] > 0) {
204 		exp--;
205 		fp_putmant128(dest, &temp, 1);
206 	} else
207 		fp_putmant128(dest, &temp, 0);
208 
209 	if (exp >= 0x7fff) {
210 		fp_set_ovrflw(dest);
211 		return dest;
212 	}
213 	dest->exp = exp;
214 	if (exp < 0) {
215 		fp_set_sr(FPSR_EXC_UNFL);
216 		fp_denormalize(dest, -exp);
217 	}
218 
219 	return dest;
220 }
221 
222 /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
223    FSGLDIV instructions.
224 
225    Note that the order of the operands is counter-intuitive: instead
226    of src / dest, the result is actually dest / src. */
227 
228 struct fp_ext *
229 fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
230 {
231 	union fp_mant128 temp;
232 	int exp;
233 
234 	dprint(PINSTR, "fdiv\n");
235 
236 	fp_dyadic_check(dest, src);
237 
238 	/* calculate the correct sign now, as it's necessary for infinities */
239 	dest->sign = src->sign ^ dest->sign;
240 
241 	/* Handle infinities */
242 	if (IS_INF(dest)) {
243 		/* infinity / infinity = NaN (quiet, as always) */
244 		if (IS_INF(src))
245 			fp_set_nan(dest);
246 		/* infinity / anything else = infinity (with appropriate sign) */
247 		return dest;
248 	}
249 	if (IS_INF(src)) {
250 		/* anything / infinity = zero (with appropriate sign) */
251 		dest->exp = 0;
252 		dest->mant.m64 = 0;
253 		dest->lowmant = 0;
254 
255 		return dest;
256 	}
257 
258 	/* zeroes */
259 	if (IS_ZERO(dest)) {
260 		/* zero / zero = NaN */
261 		if (IS_ZERO(src))
262 			fp_set_nan(dest);
263 		/* zero / anything else = zero */
264 		return dest;
265 	}
266 	if (IS_ZERO(src)) {
267 		/* anything / zero = infinity (with appropriate sign) */
268 		fp_set_sr(FPSR_EXC_DZ);
269 		dest->exp = 0x7fff;
270 		dest->mant.m64 = 0;
271 
272 		return dest;
273 	}
274 
275 	exp = dest->exp - src->exp + 0x3fff;
276 
277 	/* shift up the mantissa for denormalized numbers,
278 	   so that the highest bit is set, this makes lots
279 	   of things below easier */
280 	if ((long)dest->mant.m32[0] >= 0)
281 		exp -= fp_overnormalize(dest);
282 	if ((long)src->mant.m32[0] >= 0)
283 		exp -= fp_overnormalize(src);
284 
285 	/* now, do the 64-bit divide */
286 	fp_dividemant(&temp, dest, src);
287 
288 	/* normalize it back to 64 bits and stuff it back into the
289 	   destination struct */
290 	if (!temp.m32[0]) {
291 		exp--;
292 		fp_putmant128(dest, &temp, 32);
293 	} else
294 		fp_putmant128(dest, &temp, 31);
295 
296 	if (exp >= 0x7fff) {
297 		fp_set_ovrflw(dest);
298 		return dest;
299 	}
300 	dest->exp = exp;
301 	if (exp < 0) {
302 		fp_set_sr(FPSR_EXC_UNFL);
303 		fp_denormalize(dest, -exp);
304 	}
305 
306 	return dest;
307 }
308 
309 struct fp_ext *
310 fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
311 {
312 	int exp;
313 
314 	dprint(PINSTR, "fsglmul\n");
315 
316 	fp_dyadic_check(dest, src);
317 
318 	/* calculate the correct sign now, as it's necessary for infinities */
319 	dest->sign = src->sign ^ dest->sign;
320 
321 	/* Handle infinities */
322 	if (IS_INF(dest)) {
323 		if (IS_ZERO(src))
324 			fp_set_nan(dest);
325 		return dest;
326 	}
327 	if (IS_INF(src)) {
328 		if (IS_ZERO(dest))
329 			fp_set_nan(dest);
330 		else
331 			fp_copy_ext(dest, src);
332 		return dest;
333 	}
334 
335 	/* Of course, as we all know, zero * anything = zero.  You may
336 	   not have known that it might be a positive or negative
337 	   zero... */
338 	if (IS_ZERO(dest) || IS_ZERO(src)) {
339 		dest->exp = 0;
340 		dest->mant.m64 = 0;
341 		dest->lowmant = 0;
342 
343 		return dest;
344 	}
345 
346 	exp = dest->exp + src->exp - 0x3ffe;
347 
348 	/* do a 32-bit multiply */
349 	fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
350 		 dest->mant.m32[0] & 0xffffff00,
351 		 src->mant.m32[0] & 0xffffff00);
352 
353 	if (exp >= 0x7fff) {
354 		fp_set_ovrflw(dest);
355 		return dest;
356 	}
357 	dest->exp = exp;
358 	if (exp < 0) {
359 		fp_set_sr(FPSR_EXC_UNFL);
360 		fp_denormalize(dest, -exp);
361 	}
362 
363 	return dest;
364 }
365 
366 struct fp_ext *
367 fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
368 {
369 	int exp;
370 	unsigned long quot, rem;
371 
372 	dprint(PINSTR, "fsgldiv\n");
373 
374 	fp_dyadic_check(dest, src);
375 
376 	/* calculate the correct sign now, as it's necessary for infinities */
377 	dest->sign = src->sign ^ dest->sign;
378 
379 	/* Handle infinities */
380 	if (IS_INF(dest)) {
381 		/* infinity / infinity = NaN (quiet, as always) */
382 		if (IS_INF(src))
383 			fp_set_nan(dest);
384 		/* infinity / anything else = infinity (with approprate sign) */
385 		return dest;
386 	}
387 	if (IS_INF(src)) {
388 		/* anything / infinity = zero (with appropriate sign) */
389 		dest->exp = 0;
390 		dest->mant.m64 = 0;
391 		dest->lowmant = 0;
392 
393 		return dest;
394 	}
395 
396 	/* zeroes */
397 	if (IS_ZERO(dest)) {
398 		/* zero / zero = NaN */
399 		if (IS_ZERO(src))
400 			fp_set_nan(dest);
401 		/* zero / anything else = zero */
402 		return dest;
403 	}
404 	if (IS_ZERO(src)) {
405 		/* anything / zero = infinity (with appropriate sign) */
406 		fp_set_sr(FPSR_EXC_DZ);
407 		dest->exp = 0x7fff;
408 		dest->mant.m64 = 0;
409 
410 		return dest;
411 	}
412 
413 	exp = dest->exp - src->exp + 0x3fff;
414 
415 	dest->mant.m32[0] &= 0xffffff00;
416 	src->mant.m32[0] &= 0xffffff00;
417 
418 	/* do the 32-bit divide */
419 	if (dest->mant.m32[0] >= src->mant.m32[0]) {
420 		fp_sub64(dest->mant, src->mant);
421 		fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
422 		dest->mant.m32[0] = 0x80000000 | (quot >> 1);
423 		dest->mant.m32[1] = (quot & 1) | rem;	/* only for rounding */
424 	} else {
425 		fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
426 		dest->mant.m32[0] = quot;
427 		dest->mant.m32[1] = rem;		/* only for rounding */
428 		exp--;
429 	}
430 
431 	if (exp >= 0x7fff) {
432 		fp_set_ovrflw(dest);
433 		return dest;
434 	}
435 	dest->exp = exp;
436 	if (exp < 0) {
437 		fp_set_sr(FPSR_EXC_UNFL);
438 		fp_denormalize(dest, -exp);
439 	}
440 
441 	return dest;
442 }
443 
444 /* fp_roundint: Internal rounding function for use by several of these
445    emulated instructions.
446 
447    This one rounds off the fractional part using the rounding mode
448    specified. */
449 
450 static void fp_roundint(struct fp_ext *dest, int mode)
451 {
452 	union fp_mant64 oldmant;
453 	unsigned long mask;
454 
455 	if (!fp_normalize_ext(dest))
456 		return;
457 
458 	/* infinities and zeroes */
459 	if (IS_INF(dest) || IS_ZERO(dest))
460 		return;
461 
462 	/* first truncate the lower bits */
463 	oldmant = dest->mant;
464 	switch (dest->exp) {
465 	case 0 ... 0x3ffe:
466 		dest->mant.m64 = 0;
467 		break;
468 	case 0x3fff ... 0x401e:
469 		dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
470 		dest->mant.m32[1] = 0;
471 		if (oldmant.m64 == dest->mant.m64)
472 			return;
473 		break;
474 	case 0x401f ... 0x403e:
475 		dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
476 		if (oldmant.m32[1] == dest->mant.m32[1])
477 			return;
478 		break;
479 	default:
480 		return;
481 	}
482 	fp_set_sr(FPSR_EXC_INEX2);
483 
484 	/* We might want to normalize upwards here... however, since
485 	   we know that this is only called on the output of fp_fdiv,
486 	   or with the input to fp_fint or fp_fintrz, and the inputs
487 	   to all these functions are either normal or denormalized
488 	   (no subnormals allowed!), there's really no need.
489 
490 	   In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
491 	   0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
492 	   smallest possible normal dividend and the largest possible normal
493 	   divisor will still produce a normal quotient, therefore, (normal
494 	   << 64) / normal is normal in all cases) */
495 
496 	switch (mode) {
497 	case FPCR_ROUND_RN:
498 		switch (dest->exp) {
499 		case 0 ... 0x3ffd:
500 			return;
501 		case 0x3ffe:
502 			/* As noted above, the input is always normal, so the
503 			   guard bit (bit 63) is always set.  therefore, the
504 			   only case in which we will NOT round to 1.0 is when
505 			   the input is exactly 0.5. */
506 			if (oldmant.m64 == (1ULL << 63))
507 				return;
508 			break;
509 		case 0x3fff ... 0x401d:
510 			mask = 1 << (0x401d - dest->exp);
511 			if (!(oldmant.m32[0] & mask))
512 				return;
513 			if (oldmant.m32[0] & (mask << 1))
514 				break;
515 			if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
516 					!oldmant.m32[1])
517 				return;
518 			break;
519 		case 0x401e:
520 			if (oldmant.m32[1] & 0x80000000)
521 				return;
522 			if (oldmant.m32[0] & 1)
523 				break;
524 			if (!(oldmant.m32[1] << 1))
525 				return;
526 			break;
527 		case 0x401f ... 0x403d:
528 			mask = 1 << (0x403d - dest->exp);
529 			if (!(oldmant.m32[1] & mask))
530 				return;
531 			if (oldmant.m32[1] & (mask << 1))
532 				break;
533 			if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
534 				return;
535 			break;
536 		default:
537 			return;
538 		}
539 		break;
540 	case FPCR_ROUND_RZ:
541 		return;
542 	default:
543 		if (dest->sign ^ (mode - FPCR_ROUND_RM))
544 			break;
545 		return;
546 	}
547 
548 	switch (dest->exp) {
549 	case 0 ... 0x3ffe:
550 		dest->exp = 0x3fff;
551 		dest->mant.m64 = 1ULL << 63;
552 		break;
553 	case 0x3fff ... 0x401e:
554 		mask = 1 << (0x401e - dest->exp);
555 		if (dest->mant.m32[0] += mask)
556 			break;
557 		dest->mant.m32[0] = 0x80000000;
558 		dest->exp++;
559 		break;
560 	case 0x401f ... 0x403e:
561 		mask = 1 << (0x403e - dest->exp);
562 		if (dest->mant.m32[1] += mask)
563 			break;
564 		if (dest->mant.m32[0] += 1)
565                         break;
566 		dest->mant.m32[0] = 0x80000000;
567                 dest->exp++;
568 		break;
569 	}
570 }
571 
572 /* modrem_kernel: Implementation of the FREM and FMOD instructions
573    (which are exactly the same, except for the rounding used on the
574    intermediate value) */
575 
576 static struct fp_ext *
577 modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode)
578 {
579 	struct fp_ext tmp;
580 
581 	fp_dyadic_check(dest, src);
582 
583 	/* Infinities and zeros */
584 	if (IS_INF(dest) || IS_ZERO(src)) {
585 		fp_set_nan(dest);
586 		return dest;
587 	}
588 	if (IS_ZERO(dest) || IS_INF(src))
589 		return dest;
590 
591 	/* FIXME: there is almost certainly a smarter way to do this */
592 	fp_copy_ext(&tmp, dest);
593 	fp_fdiv(&tmp, src);		/* NOTE: src might be modified */
594 	fp_roundint(&tmp, mode);
595 	fp_fmul(&tmp, src);
596 	fp_fsub(dest, &tmp);
597 
598 	/* set the quotient byte */
599 	fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
600 	return dest;
601 }
602 
603 /* fp_fmod: Implements the kernel of the FMOD instruction.
604 
605    Again, the argument order is backwards.  The result, as defined in
606    the Motorola manuals, is:
607 
608    fmod(src,dest) = (dest - (src * floor(dest / src))) */
609 
610 struct fp_ext *
611 fp_fmod(struct fp_ext *dest, struct fp_ext *src)
612 {
613 	dprint(PINSTR, "fmod\n");
614 	return modrem_kernel(dest, src, FPCR_ROUND_RZ);
615 }
616 
617 /* fp_frem: Implements the kernel of the FREM instruction.
618 
619    frem(src,dest) = (dest - (src * round(dest / src)))
620  */
621 
622 struct fp_ext *
623 fp_frem(struct fp_ext *dest, struct fp_ext *src)
624 {
625 	dprint(PINSTR, "frem\n");
626 	return modrem_kernel(dest, src, FPCR_ROUND_RN);
627 }
628 
629 struct fp_ext *
630 fp_fint(struct fp_ext *dest, struct fp_ext *src)
631 {
632 	dprint(PINSTR, "fint\n");
633 
634 	fp_copy_ext(dest, src);
635 
636 	fp_roundint(dest, FPDATA->rnd);
637 
638 	return dest;
639 }
640 
641 struct fp_ext *
642 fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
643 {
644 	dprint(PINSTR, "fintrz\n");
645 
646 	fp_copy_ext(dest, src);
647 
648 	fp_roundint(dest, FPCR_ROUND_RZ);
649 
650 	return dest;
651 }
652 
653 struct fp_ext *
654 fp_fscale(struct fp_ext *dest, struct fp_ext *src)
655 {
656 	int scale, oldround;
657 
658 	dprint(PINSTR, "fscale\n");
659 
660 	fp_dyadic_check(dest, src);
661 
662 	/* Infinities */
663 	if (IS_INF(src)) {
664 		fp_set_nan(dest);
665 		return dest;
666 	}
667 	if (IS_INF(dest))
668 		return dest;
669 
670 	/* zeroes */
671 	if (IS_ZERO(src) || IS_ZERO(dest))
672 		return dest;
673 
674 	/* Source exponent out of range */
675 	if (src->exp >= 0x400c) {
676 		fp_set_ovrflw(dest);
677 		return dest;
678 	}
679 
680 	/* src must be rounded with round to zero. */
681 	oldround = FPDATA->rnd;
682 	FPDATA->rnd = FPCR_ROUND_RZ;
683 	scale = fp_conv_ext2long(src);
684 	FPDATA->rnd = oldround;
685 
686 	/* new exponent */
687 	scale += dest->exp;
688 
689 	if (scale >= 0x7fff) {
690 		fp_set_ovrflw(dest);
691 	} else if (scale <= 0) {
692 		fp_set_sr(FPSR_EXC_UNFL);
693 		fp_denormalize(dest, -scale);
694 	} else
695 		dest->exp = scale;
696 
697 	return dest;
698 }
699 
700