1*337ce206SJani Nikula /* SPDX-License-Identifier: MIT */
2*337ce206SJani Nikula /*
3*337ce206SJani Nikula * Copyright © 2018 Intel Corporation
4*337ce206SJani Nikula */
5*337ce206SJani Nikula
6*337ce206SJani Nikula #ifndef _I915_FIXED_H_
7*337ce206SJani Nikula #define _I915_FIXED_H_
8*337ce206SJani Nikula
9*337ce206SJani Nikula #include <linux/bug.h>
10*337ce206SJani Nikula #include <linux/kernel.h>
11*337ce206SJani Nikula #include <linux/math64.h>
12*337ce206SJani Nikula #include <linux/types.h>
13*337ce206SJani Nikula
14*337ce206SJani Nikula typedef struct {
15*337ce206SJani Nikula u32 val;
16*337ce206SJani Nikula } uint_fixed_16_16_t;
17*337ce206SJani Nikula
18*337ce206SJani Nikula #define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
19*337ce206SJani Nikula
is_fixed16_zero(uint_fixed_16_16_t val)20*337ce206SJani Nikula static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
21*337ce206SJani Nikula {
22*337ce206SJani Nikula return val.val == 0;
23*337ce206SJani Nikula }
24*337ce206SJani Nikula
u32_to_fixed16(u32 val)25*337ce206SJani Nikula static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
26*337ce206SJani Nikula {
27*337ce206SJani Nikula uint_fixed_16_16_t fp = { .val = val << 16 };
28*337ce206SJani Nikula
29*337ce206SJani Nikula WARN_ON(val > U16_MAX);
30*337ce206SJani Nikula
31*337ce206SJani Nikula return fp;
32*337ce206SJani Nikula }
33*337ce206SJani Nikula
fixed16_to_u32_round_up(uint_fixed_16_16_t fp)34*337ce206SJani Nikula static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
35*337ce206SJani Nikula {
36*337ce206SJani Nikula return DIV_ROUND_UP(fp.val, 1 << 16);
37*337ce206SJani Nikula }
38*337ce206SJani Nikula
fixed16_to_u32(uint_fixed_16_16_t fp)39*337ce206SJani Nikula static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
40*337ce206SJani Nikula {
41*337ce206SJani Nikula return fp.val >> 16;
42*337ce206SJani Nikula }
43*337ce206SJani Nikula
min_fixed16(uint_fixed_16_16_t min1,uint_fixed_16_16_t min2)44*337ce206SJani Nikula static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
45*337ce206SJani Nikula uint_fixed_16_16_t min2)
46*337ce206SJani Nikula {
47*337ce206SJani Nikula uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
48*337ce206SJani Nikula
49*337ce206SJani Nikula return min;
50*337ce206SJani Nikula }
51*337ce206SJani Nikula
max_fixed16(uint_fixed_16_16_t max1,uint_fixed_16_16_t max2)52*337ce206SJani Nikula static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
53*337ce206SJani Nikula uint_fixed_16_16_t max2)
54*337ce206SJani Nikula {
55*337ce206SJani Nikula uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
56*337ce206SJani Nikula
57*337ce206SJani Nikula return max;
58*337ce206SJani Nikula }
59*337ce206SJani Nikula
clamp_u64_to_fixed16(u64 val)60*337ce206SJani Nikula static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
61*337ce206SJani Nikula {
62*337ce206SJani Nikula uint_fixed_16_16_t fp = { .val = (u32)val };
63*337ce206SJani Nikula
64*337ce206SJani Nikula WARN_ON(val > U32_MAX);
65*337ce206SJani Nikula
66*337ce206SJani Nikula return fp;
67*337ce206SJani Nikula }
68*337ce206SJani Nikula
div_round_up_fixed16(uint_fixed_16_16_t val,uint_fixed_16_16_t d)69*337ce206SJani Nikula static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
70*337ce206SJani Nikula uint_fixed_16_16_t d)
71*337ce206SJani Nikula {
72*337ce206SJani Nikula return DIV_ROUND_UP(val.val, d.val);
73*337ce206SJani Nikula }
74*337ce206SJani Nikula
mul_round_up_u32_fixed16(u32 val,uint_fixed_16_16_t mul)75*337ce206SJani Nikula static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
76*337ce206SJani Nikula {
77*337ce206SJani Nikula u64 tmp;
78*337ce206SJani Nikula
79*337ce206SJani Nikula tmp = mul_u32_u32(val, mul.val);
80*337ce206SJani Nikula tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
81*337ce206SJani Nikula WARN_ON(tmp > U32_MAX);
82*337ce206SJani Nikula
83*337ce206SJani Nikula return (u32)tmp;
84*337ce206SJani Nikula }
85*337ce206SJani Nikula
mul_fixed16(uint_fixed_16_16_t val,uint_fixed_16_16_t mul)86*337ce206SJani Nikula static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
87*337ce206SJani Nikula uint_fixed_16_16_t mul)
88*337ce206SJani Nikula {
89*337ce206SJani Nikula u64 tmp;
90*337ce206SJani Nikula
91*337ce206SJani Nikula tmp = mul_u32_u32(val.val, mul.val);
92*337ce206SJani Nikula tmp = tmp >> 16;
93*337ce206SJani Nikula
94*337ce206SJani Nikula return clamp_u64_to_fixed16(tmp);
95*337ce206SJani Nikula }
96*337ce206SJani Nikula
div_fixed16(u32 val,u32 d)97*337ce206SJani Nikula static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
98*337ce206SJani Nikula {
99*337ce206SJani Nikula u64 tmp;
100*337ce206SJani Nikula
101*337ce206SJani Nikula tmp = (u64)val << 16;
102*337ce206SJani Nikula tmp = DIV_ROUND_UP_ULL(tmp, d);
103*337ce206SJani Nikula
104*337ce206SJani Nikula return clamp_u64_to_fixed16(tmp);
105*337ce206SJani Nikula }
106*337ce206SJani Nikula
div_round_up_u32_fixed16(u32 val,uint_fixed_16_16_t d)107*337ce206SJani Nikula static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
108*337ce206SJani Nikula {
109*337ce206SJani Nikula u64 tmp;
110*337ce206SJani Nikula
111*337ce206SJani Nikula tmp = (u64)val << 16;
112*337ce206SJani Nikula tmp = DIV_ROUND_UP_ULL(tmp, d.val);
113*337ce206SJani Nikula WARN_ON(tmp > U32_MAX);
114*337ce206SJani Nikula
115*337ce206SJani Nikula return (u32)tmp;
116*337ce206SJani Nikula }
117*337ce206SJani Nikula
mul_u32_fixed16(u32 val,uint_fixed_16_16_t mul)118*337ce206SJani Nikula static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
119*337ce206SJani Nikula {
120*337ce206SJani Nikula u64 tmp;
121*337ce206SJani Nikula
122*337ce206SJani Nikula tmp = mul_u32_u32(val, mul.val);
123*337ce206SJani Nikula
124*337ce206SJani Nikula return clamp_u64_to_fixed16(tmp);
125*337ce206SJani Nikula }
126*337ce206SJani Nikula
add_fixed16(uint_fixed_16_16_t add1,uint_fixed_16_16_t add2)127*337ce206SJani Nikula static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
128*337ce206SJani Nikula uint_fixed_16_16_t add2)
129*337ce206SJani Nikula {
130*337ce206SJani Nikula u64 tmp;
131*337ce206SJani Nikula
132*337ce206SJani Nikula tmp = (u64)add1.val + add2.val;
133*337ce206SJani Nikula
134*337ce206SJani Nikula return clamp_u64_to_fixed16(tmp);
135*337ce206SJani Nikula }
136*337ce206SJani Nikula
add_fixed16_u32(uint_fixed_16_16_t add1,u32 add2)137*337ce206SJani Nikula static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
138*337ce206SJani Nikula u32 add2)
139*337ce206SJani Nikula {
140*337ce206SJani Nikula uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
141*337ce206SJani Nikula u64 tmp;
142*337ce206SJani Nikula
143*337ce206SJani Nikula tmp = (u64)add1.val + tmp_add2.val;
144*337ce206SJani Nikula
145*337ce206SJani Nikula return clamp_u64_to_fixed16(tmp);
146*337ce206SJani Nikula }
147*337ce206SJani Nikula
148*337ce206SJani Nikula #endif /* _I915_FIXED_H_ */
149