xref: /linux/fs/bcachefs/mean_and_variance.c (revision c48a7c44a1d02516309015b6134c9bb982e17008)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions for incremental mean and variance.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * Copyright © 2022 Daniel B. Hill
15  *
16  * Author: Daniel B. Hill <daniel@gluo.nz>
17  *
18  * Description:
19  *
20  * This is includes some incremental algorithms for mean and variance calculation
21  *
22  * Derived from the paper: https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
23  *
24  * Create a struct and if it's the weighted variant set the w field (weight = 2^k).
25  *
26  * Use mean_and_variance[_weighted]_update() on the struct to update it's state.
27  *
28  * Use the mean_and_variance[_weighted]_get_* functions to calculate the mean and variance, some computation
29  * is deferred to these functions for performance reasons.
30  *
31  * see lib/math/mean_and_variance_test.c for examples of usage.
32  *
33  * DO NOT access the mean and variance fields of the weighted variants directly.
34  * DO NOT change the weight after calling update.
35  */
36 
37 #include <linux/bug.h>
38 #include <linux/compiler.h>
39 #include <linux/export.h>
40 #include <linux/limits.h>
41 #include <linux/math.h>
42 #include <linux/math64.h>
43 #include <linux/module.h>
44 
45 #include "mean_and_variance.h"
46 
47 u128_u u128_div(u128_u n, u64 d)
48 {
49 	u128_u r;
50 	u64 rem;
51 	u64 hi = u128_hi(n);
52 	u64 lo = u128_lo(n);
53 	u64  h =  hi & ((u64) U32_MAX  << 32);
54 	u64  l = (hi &  (u64) U32_MAX) << 32;
55 
56 	r =             u128_shl(u64_to_u128(div64_u64_rem(h,                d, &rem)), 64);
57 	r = u128_add(r, u128_shl(u64_to_u128(div64_u64_rem(l  + (rem << 32), d, &rem)), 32));
58 	r = u128_add(r,          u64_to_u128(div64_u64_rem(lo + (rem << 32), d, &rem)));
59 	return r;
60 }
61 EXPORT_SYMBOL_GPL(u128_div);
62 
63 /**
64  * mean_and_variance_get_mean() - get mean from @s
65  */
66 s64 mean_and_variance_get_mean(struct mean_and_variance s)
67 {
68 	return s.n ? div64_u64(s.sum, s.n) : 0;
69 }
70 EXPORT_SYMBOL_GPL(mean_and_variance_get_mean);
71 
72 /**
73  * mean_and_variance_get_variance() -  get variance from @s1
74  *
75  * see linked pdf equation 12.
76  */
77 u64 mean_and_variance_get_variance(struct mean_and_variance s1)
78 {
79 	if (s1.n) {
80 		u128_u s2 = u128_div(s1.sum_squares, s1.n);
81 		u64  s3 = abs(mean_and_variance_get_mean(s1));
82 
83 		return u128_lo(u128_sub(s2, u128_square(s3)));
84 	} else {
85 		return 0;
86 	}
87 }
88 EXPORT_SYMBOL_GPL(mean_and_variance_get_variance);
89 
90 /**
91  * mean_and_variance_get_stddev() - get standard deviation from @s
92  */
93 u32 mean_and_variance_get_stddev(struct mean_and_variance s)
94 {
95 	return int_sqrt64(mean_and_variance_get_variance(s));
96 }
97 EXPORT_SYMBOL_GPL(mean_and_variance_get_stddev);
98 
99 /**
100  * mean_and_variance_weighted_update() - exponentially weighted variant of mean_and_variance_update()
101  * @s1: ..
102  * @s2: ..
103  *
104  * see linked pdf: function derived from equations 140-143 where alpha = 2^w.
105  * values are stored bitshifted for performance and added precision.
106  */
107 void mean_and_variance_weighted_update(struct mean_and_variance_weighted *s, s64 x)
108 {
109 	// previous weighted variance.
110 	u8 w		= s->weight;
111 	u64 var_w0	= s->variance;
112 	// new value weighted.
113 	s64 x_w		= x << w;
114 	s64 diff_w	= x_w - s->mean;
115 	s64 diff	= fast_divpow2(diff_w, w);
116 	// new mean weighted.
117 	s64 u_w1	= s->mean + diff;
118 
119 	if (!s->init) {
120 		s->mean = x_w;
121 		s->variance = 0;
122 	} else {
123 		s->mean = u_w1;
124 		s->variance = ((var_w0 << w) - var_w0 + ((diff_w * (x_w - u_w1)) >> w)) >> w;
125 	}
126 	s->init = true;
127 }
128 EXPORT_SYMBOL_GPL(mean_and_variance_weighted_update);
129 
130 /**
131  * mean_and_variance_weighted_get_mean() - get mean from @s
132  */
133 s64 mean_and_variance_weighted_get_mean(struct mean_and_variance_weighted s)
134 {
135 	return fast_divpow2(s.mean, s.weight);
136 }
137 EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_mean);
138 
139 /**
140  * mean_and_variance_weighted_get_variance() -- get variance from @s
141  */
142 u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s)
143 {
144 	// always positive don't need fast divpow2
145 	return s.variance >> s.weight;
146 }
147 EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_variance);
148 
149 /**
150  * mean_and_variance_weighted_get_stddev() - get standard deviation from @s
151  */
152 u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s)
153 {
154 	return int_sqrt64(mean_and_variance_weighted_get_variance(s));
155 }
156 EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_stddev);
157 
158 MODULE_AUTHOR("Daniel B. Hill");
159 MODULE_LICENSE("GPL");
160