xref: /freebsd/sys/contrib/dev/athk/dfs_pri_detector.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1*ebacd801SBjoern A. Zeeb /*
2*ebacd801SBjoern A. Zeeb  * Copyright (c) 2012 Neratec Solutions AG
3*ebacd801SBjoern A. Zeeb  *
4*ebacd801SBjoern A. Zeeb  * Permission to use, copy, modify, and/or distribute this software for any
5*ebacd801SBjoern A. Zeeb  * purpose with or without fee is hereby granted, provided that the above
6*ebacd801SBjoern A. Zeeb  * copyright notice and this permission notice appear in all copies.
7*ebacd801SBjoern A. Zeeb  *
8*ebacd801SBjoern A. Zeeb  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9*ebacd801SBjoern A. Zeeb  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10*ebacd801SBjoern A. Zeeb  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11*ebacd801SBjoern A. Zeeb  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12*ebacd801SBjoern A. Zeeb  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13*ebacd801SBjoern A. Zeeb  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14*ebacd801SBjoern A. Zeeb  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15*ebacd801SBjoern A. Zeeb  */
16*ebacd801SBjoern A. Zeeb 
17*ebacd801SBjoern A. Zeeb #include <linux/slab.h>
18*ebacd801SBjoern A. Zeeb #include <linux/spinlock.h>
19*ebacd801SBjoern A. Zeeb 
20*ebacd801SBjoern A. Zeeb #include "ath.h"
21*ebacd801SBjoern A. Zeeb #include "dfs_pattern_detector.h"
22*ebacd801SBjoern A. Zeeb #include "dfs_pri_detector.h"
23*ebacd801SBjoern A. Zeeb 
24*ebacd801SBjoern A. Zeeb struct ath_dfs_pool_stats global_dfs_pool_stats = {};
25*ebacd801SBjoern A. Zeeb 
26*ebacd801SBjoern A. Zeeb #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27*ebacd801SBjoern A. Zeeb #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
28*ebacd801SBjoern A. Zeeb #define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
29*ebacd801SBjoern A. Zeeb 	(MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
30*ebacd801SBjoern A. Zeeb 	MIN + PRI_TOLERANCE : RUNTIME)
31*ebacd801SBjoern A. Zeeb 
32*ebacd801SBjoern A. Zeeb /*
33*ebacd801SBjoern A. Zeeb  * struct pulse_elem - elements in pulse queue
34*ebacd801SBjoern A. Zeeb  */
35*ebacd801SBjoern A. Zeeb struct pulse_elem {
36*ebacd801SBjoern A. Zeeb 	struct list_head head;
37*ebacd801SBjoern A. Zeeb 	u64 ts;
38*ebacd801SBjoern A. Zeeb };
39*ebacd801SBjoern A. Zeeb 
40*ebacd801SBjoern A. Zeeb /*
41*ebacd801SBjoern A. Zeeb  * pde_get_multiple() - get number of multiples considering a given tolerance
42*ebacd801SBjoern A. Zeeb  * Return value: factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
43*ebacd801SBjoern A. Zeeb  */
pde_get_multiple(u32 val,u32 fraction,u32 tolerance)44*ebacd801SBjoern A. Zeeb static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
45*ebacd801SBjoern A. Zeeb {
46*ebacd801SBjoern A. Zeeb 	u32 remainder;
47*ebacd801SBjoern A. Zeeb 	u32 factor;
48*ebacd801SBjoern A. Zeeb 	u32 delta;
49*ebacd801SBjoern A. Zeeb 
50*ebacd801SBjoern A. Zeeb 	if (fraction == 0)
51*ebacd801SBjoern A. Zeeb 		return 0;
52*ebacd801SBjoern A. Zeeb 
53*ebacd801SBjoern A. Zeeb 	delta = (val < fraction) ? (fraction - val) : (val - fraction);
54*ebacd801SBjoern A. Zeeb 
55*ebacd801SBjoern A. Zeeb 	if (delta <= tolerance)
56*ebacd801SBjoern A. Zeeb 		/* val and fraction are within tolerance */
57*ebacd801SBjoern A. Zeeb 		return 1;
58*ebacd801SBjoern A. Zeeb 
59*ebacd801SBjoern A. Zeeb 	factor = val / fraction;
60*ebacd801SBjoern A. Zeeb 	remainder = val % fraction;
61*ebacd801SBjoern A. Zeeb 	if (remainder > tolerance) {
62*ebacd801SBjoern A. Zeeb 		/* no exact match */
63*ebacd801SBjoern A. Zeeb 		if ((fraction - remainder) <= tolerance)
64*ebacd801SBjoern A. Zeeb 			/* remainder is within tolerance */
65*ebacd801SBjoern A. Zeeb 			factor++;
66*ebacd801SBjoern A. Zeeb 		else
67*ebacd801SBjoern A. Zeeb 			factor = 0;
68*ebacd801SBjoern A. Zeeb 	}
69*ebacd801SBjoern A. Zeeb 	return factor;
70*ebacd801SBjoern A. Zeeb }
71*ebacd801SBjoern A. Zeeb 
72*ebacd801SBjoern A. Zeeb /*
73*ebacd801SBjoern A. Zeeb  * DOC: Singleton Pulse and Sequence Pools
74*ebacd801SBjoern A. Zeeb  *
75*ebacd801SBjoern A. Zeeb  * Instances of pri_sequence and pulse_elem are kept in singleton pools to
76*ebacd801SBjoern A. Zeeb  * reduce the number of dynamic allocations. They are shared between all
77*ebacd801SBjoern A. Zeeb  * instances and grow up to the peak number of simultaneously used objects.
78*ebacd801SBjoern A. Zeeb  *
79*ebacd801SBjoern A. Zeeb  * Memory is freed after all references to the pools are released.
80*ebacd801SBjoern A. Zeeb  */
81*ebacd801SBjoern A. Zeeb static u32 singleton_pool_references;
82*ebacd801SBjoern A. Zeeb static LIST_HEAD(pulse_pool);
83*ebacd801SBjoern A. Zeeb static LIST_HEAD(pseq_pool);
84*ebacd801SBjoern A. Zeeb static DEFINE_SPINLOCK(pool_lock);
85*ebacd801SBjoern A. Zeeb 
pool_register_ref(void)86*ebacd801SBjoern A. Zeeb static void pool_register_ref(void)
87*ebacd801SBjoern A. Zeeb {
88*ebacd801SBjoern A. Zeeb 	spin_lock_bh(&pool_lock);
89*ebacd801SBjoern A. Zeeb 	singleton_pool_references++;
90*ebacd801SBjoern A. Zeeb 	DFS_POOL_STAT_INC(pool_reference);
91*ebacd801SBjoern A. Zeeb 	spin_unlock_bh(&pool_lock);
92*ebacd801SBjoern A. Zeeb }
93*ebacd801SBjoern A. Zeeb 
pool_deregister_ref(void)94*ebacd801SBjoern A. Zeeb static void pool_deregister_ref(void)
95*ebacd801SBjoern A. Zeeb {
96*ebacd801SBjoern A. Zeeb 	spin_lock_bh(&pool_lock);
97*ebacd801SBjoern A. Zeeb 	singleton_pool_references--;
98*ebacd801SBjoern A. Zeeb 	DFS_POOL_STAT_DEC(pool_reference);
99*ebacd801SBjoern A. Zeeb 	if (singleton_pool_references == 0) {
100*ebacd801SBjoern A. Zeeb 		/* free singleton pools with no references left */
101*ebacd801SBjoern A. Zeeb 		struct pri_sequence *ps, *ps0;
102*ebacd801SBjoern A. Zeeb 		struct pulse_elem *p, *p0;
103*ebacd801SBjoern A. Zeeb 
104*ebacd801SBjoern A. Zeeb 		list_for_each_entry_safe(p, p0, &pulse_pool, head) {
105*ebacd801SBjoern A. Zeeb 			list_del(&p->head);
106*ebacd801SBjoern A. Zeeb 			DFS_POOL_STAT_DEC(pulse_allocated);
107*ebacd801SBjoern A. Zeeb 			kfree(p);
108*ebacd801SBjoern A. Zeeb 		}
109*ebacd801SBjoern A. Zeeb 		list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
110*ebacd801SBjoern A. Zeeb 			list_del(&ps->head);
111*ebacd801SBjoern A. Zeeb 			DFS_POOL_STAT_DEC(pseq_allocated);
112*ebacd801SBjoern A. Zeeb 			kfree(ps);
113*ebacd801SBjoern A. Zeeb 		}
114*ebacd801SBjoern A. Zeeb 	}
115*ebacd801SBjoern A. Zeeb 	spin_unlock_bh(&pool_lock);
116*ebacd801SBjoern A. Zeeb }
117*ebacd801SBjoern A. Zeeb 
pool_put_pulse_elem(struct pulse_elem * pe)118*ebacd801SBjoern A. Zeeb static void pool_put_pulse_elem(struct pulse_elem *pe)
119*ebacd801SBjoern A. Zeeb {
120*ebacd801SBjoern A. Zeeb 	spin_lock_bh(&pool_lock);
121*ebacd801SBjoern A. Zeeb 	list_add(&pe->head, &pulse_pool);
122*ebacd801SBjoern A. Zeeb 	DFS_POOL_STAT_DEC(pulse_used);
123*ebacd801SBjoern A. Zeeb 	spin_unlock_bh(&pool_lock);
124*ebacd801SBjoern A. Zeeb }
125*ebacd801SBjoern A. Zeeb 
pool_put_pseq_elem(struct pri_sequence * pse)126*ebacd801SBjoern A. Zeeb static void pool_put_pseq_elem(struct pri_sequence *pse)
127*ebacd801SBjoern A. Zeeb {
128*ebacd801SBjoern A. Zeeb 	spin_lock_bh(&pool_lock);
129*ebacd801SBjoern A. Zeeb 	list_add(&pse->head, &pseq_pool);
130*ebacd801SBjoern A. Zeeb 	DFS_POOL_STAT_DEC(pseq_used);
131*ebacd801SBjoern A. Zeeb 	spin_unlock_bh(&pool_lock);
132*ebacd801SBjoern A. Zeeb }
133*ebacd801SBjoern A. Zeeb 
pool_get_pseq_elem(void)134*ebacd801SBjoern A. Zeeb static struct pri_sequence *pool_get_pseq_elem(void)
135*ebacd801SBjoern A. Zeeb {
136*ebacd801SBjoern A. Zeeb 	struct pri_sequence *pse = NULL;
137*ebacd801SBjoern A. Zeeb 	spin_lock_bh(&pool_lock);
138*ebacd801SBjoern A. Zeeb 	if (!list_empty(&pseq_pool)) {
139*ebacd801SBjoern A. Zeeb 		pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
140*ebacd801SBjoern A. Zeeb 		list_del(&pse->head);
141*ebacd801SBjoern A. Zeeb 		DFS_POOL_STAT_INC(pseq_used);
142*ebacd801SBjoern A. Zeeb 	}
143*ebacd801SBjoern A. Zeeb 	spin_unlock_bh(&pool_lock);
144*ebacd801SBjoern A. Zeeb 	return pse;
145*ebacd801SBjoern A. Zeeb }
146*ebacd801SBjoern A. Zeeb 
pool_get_pulse_elem(void)147*ebacd801SBjoern A. Zeeb static struct pulse_elem *pool_get_pulse_elem(void)
148*ebacd801SBjoern A. Zeeb {
149*ebacd801SBjoern A. Zeeb 	struct pulse_elem *pe = NULL;
150*ebacd801SBjoern A. Zeeb 	spin_lock_bh(&pool_lock);
151*ebacd801SBjoern A. Zeeb 	if (!list_empty(&pulse_pool)) {
152*ebacd801SBjoern A. Zeeb 		pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
153*ebacd801SBjoern A. Zeeb 		list_del(&pe->head);
154*ebacd801SBjoern A. Zeeb 		DFS_POOL_STAT_INC(pulse_used);
155*ebacd801SBjoern A. Zeeb 	}
156*ebacd801SBjoern A. Zeeb 	spin_unlock_bh(&pool_lock);
157*ebacd801SBjoern A. Zeeb 	return pe;
158*ebacd801SBjoern A. Zeeb }
159*ebacd801SBjoern A. Zeeb 
pulse_queue_get_tail(struct pri_detector * pde)160*ebacd801SBjoern A. Zeeb static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
161*ebacd801SBjoern A. Zeeb {
162*ebacd801SBjoern A. Zeeb 	struct list_head *l = &pde->pulses;
163*ebacd801SBjoern A. Zeeb 	if (list_empty(l))
164*ebacd801SBjoern A. Zeeb 		return NULL;
165*ebacd801SBjoern A. Zeeb 	return list_entry(l->prev, struct pulse_elem, head);
166*ebacd801SBjoern A. Zeeb }
167*ebacd801SBjoern A. Zeeb 
pulse_queue_dequeue(struct pri_detector * pde)168*ebacd801SBjoern A. Zeeb static bool pulse_queue_dequeue(struct pri_detector *pde)
169*ebacd801SBjoern A. Zeeb {
170*ebacd801SBjoern A. Zeeb 	struct pulse_elem *p = pulse_queue_get_tail(pde);
171*ebacd801SBjoern A. Zeeb 	if (p != NULL) {
172*ebacd801SBjoern A. Zeeb 		list_del_init(&p->head);
173*ebacd801SBjoern A. Zeeb 		pde->count--;
174*ebacd801SBjoern A. Zeeb 		/* give it back to pool */
175*ebacd801SBjoern A. Zeeb 		pool_put_pulse_elem(p);
176*ebacd801SBjoern A. Zeeb 	}
177*ebacd801SBjoern A. Zeeb 	return (pde->count > 0);
178*ebacd801SBjoern A. Zeeb }
179*ebacd801SBjoern A. Zeeb 
180*ebacd801SBjoern A. Zeeb /* remove pulses older than window */
pulse_queue_check_window(struct pri_detector * pde)181*ebacd801SBjoern A. Zeeb static void pulse_queue_check_window(struct pri_detector *pde)
182*ebacd801SBjoern A. Zeeb {
183*ebacd801SBjoern A. Zeeb 	u64 min_valid_ts;
184*ebacd801SBjoern A. Zeeb 	struct pulse_elem *p;
185*ebacd801SBjoern A. Zeeb 
186*ebacd801SBjoern A. Zeeb 	/* there is no delta time with less than 2 pulses */
187*ebacd801SBjoern A. Zeeb 	if (pde->count < 2)
188*ebacd801SBjoern A. Zeeb 		return;
189*ebacd801SBjoern A. Zeeb 
190*ebacd801SBjoern A. Zeeb 	if (pde->last_ts <= pde->window_size)
191*ebacd801SBjoern A. Zeeb 		return;
192*ebacd801SBjoern A. Zeeb 
193*ebacd801SBjoern A. Zeeb 	min_valid_ts = pde->last_ts - pde->window_size;
194*ebacd801SBjoern A. Zeeb 	while ((p = pulse_queue_get_tail(pde)) != NULL) {
195*ebacd801SBjoern A. Zeeb 		if (p->ts >= min_valid_ts)
196*ebacd801SBjoern A. Zeeb 			return;
197*ebacd801SBjoern A. Zeeb 		pulse_queue_dequeue(pde);
198*ebacd801SBjoern A. Zeeb 	}
199*ebacd801SBjoern A. Zeeb }
200*ebacd801SBjoern A. Zeeb 
pulse_queue_enqueue(struct pri_detector * pde,u64 ts)201*ebacd801SBjoern A. Zeeb static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
202*ebacd801SBjoern A. Zeeb {
203*ebacd801SBjoern A. Zeeb 	struct pulse_elem *p = pool_get_pulse_elem();
204*ebacd801SBjoern A. Zeeb 	if (p == NULL) {
205*ebacd801SBjoern A. Zeeb 		p = kmalloc(sizeof(*p), GFP_ATOMIC);
206*ebacd801SBjoern A. Zeeb 		if (p == NULL) {
207*ebacd801SBjoern A. Zeeb 			DFS_POOL_STAT_INC(pulse_alloc_error);
208*ebacd801SBjoern A. Zeeb 			return false;
209*ebacd801SBjoern A. Zeeb 		}
210*ebacd801SBjoern A. Zeeb 		DFS_POOL_STAT_INC(pulse_allocated);
211*ebacd801SBjoern A. Zeeb 		DFS_POOL_STAT_INC(pulse_used);
212*ebacd801SBjoern A. Zeeb 	}
213*ebacd801SBjoern A. Zeeb 	INIT_LIST_HEAD(&p->head);
214*ebacd801SBjoern A. Zeeb 	p->ts = ts;
215*ebacd801SBjoern A. Zeeb 	list_add(&p->head, &pde->pulses);
216*ebacd801SBjoern A. Zeeb 	pde->count++;
217*ebacd801SBjoern A. Zeeb 	pde->last_ts = ts;
218*ebacd801SBjoern A. Zeeb 	pulse_queue_check_window(pde);
219*ebacd801SBjoern A. Zeeb 	if (pde->count >= pde->max_count)
220*ebacd801SBjoern A. Zeeb 		pulse_queue_dequeue(pde);
221*ebacd801SBjoern A. Zeeb 	return true;
222*ebacd801SBjoern A. Zeeb }
223*ebacd801SBjoern A. Zeeb 
pseq_handler_create_sequences(struct pri_detector * pde,u64 ts,u32 min_count)224*ebacd801SBjoern A. Zeeb static bool pseq_handler_create_sequences(struct pri_detector *pde,
225*ebacd801SBjoern A. Zeeb 					  u64 ts, u32 min_count)
226*ebacd801SBjoern A. Zeeb {
227*ebacd801SBjoern A. Zeeb 	struct pulse_elem *p;
228*ebacd801SBjoern A. Zeeb 	list_for_each_entry(p, &pde->pulses, head) {
229*ebacd801SBjoern A. Zeeb 		struct pri_sequence ps, *new_ps;
230*ebacd801SBjoern A. Zeeb 		struct pulse_elem *p2;
231*ebacd801SBjoern A. Zeeb 		u32 tmp_false_count;
232*ebacd801SBjoern A. Zeeb 		u64 min_valid_ts;
233*ebacd801SBjoern A. Zeeb 		u32 delta_ts = ts - p->ts;
234*ebacd801SBjoern A. Zeeb 
235*ebacd801SBjoern A. Zeeb 		if (delta_ts < pde->rs->pri_min)
236*ebacd801SBjoern A. Zeeb 			/* ignore too small pri */
237*ebacd801SBjoern A. Zeeb 			continue;
238*ebacd801SBjoern A. Zeeb 
239*ebacd801SBjoern A. Zeeb 		if (delta_ts > pde->rs->pri_max)
240*ebacd801SBjoern A. Zeeb 			/* stop on too large pri (sorted list) */
241*ebacd801SBjoern A. Zeeb 			break;
242*ebacd801SBjoern A. Zeeb 
243*ebacd801SBjoern A. Zeeb 		/* build a new sequence with new potential pri */
244*ebacd801SBjoern A. Zeeb 		ps.count = 2;
245*ebacd801SBjoern A. Zeeb 		ps.count_falses = 0;
246*ebacd801SBjoern A. Zeeb 		ps.first_ts = p->ts;
247*ebacd801SBjoern A. Zeeb 		ps.last_ts = ts;
248*ebacd801SBjoern A. Zeeb 		ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
249*ebacd801SBjoern A. Zeeb 			pde->rs->pri_max, ts - p->ts);
250*ebacd801SBjoern A. Zeeb 		ps.dur = ps.pri * (pde->rs->ppb - 1)
251*ebacd801SBjoern A. Zeeb 				+ 2 * pde->rs->max_pri_tolerance;
252*ebacd801SBjoern A. Zeeb 
253*ebacd801SBjoern A. Zeeb 		p2 = p;
254*ebacd801SBjoern A. Zeeb 		tmp_false_count = 0;
255*ebacd801SBjoern A. Zeeb 		min_valid_ts = ts - ps.dur;
256*ebacd801SBjoern A. Zeeb 		/* check which past pulses are candidates for new sequence */
257*ebacd801SBjoern A. Zeeb 		list_for_each_entry_continue(p2, &pde->pulses, head) {
258*ebacd801SBjoern A. Zeeb 			u32 factor;
259*ebacd801SBjoern A. Zeeb 			if (p2->ts < min_valid_ts)
260*ebacd801SBjoern A. Zeeb 				/* stop on crossing window border */
261*ebacd801SBjoern A. Zeeb 				break;
262*ebacd801SBjoern A. Zeeb 			/* check if pulse match (multi)PRI */
263*ebacd801SBjoern A. Zeeb 			factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
264*ebacd801SBjoern A. Zeeb 						  pde->rs->max_pri_tolerance);
265*ebacd801SBjoern A. Zeeb 			if (factor > 0) {
266*ebacd801SBjoern A. Zeeb 				ps.count++;
267*ebacd801SBjoern A. Zeeb 				ps.first_ts = p2->ts;
268*ebacd801SBjoern A. Zeeb 				/*
269*ebacd801SBjoern A. Zeeb 				 * on match, add the intermediate falses
270*ebacd801SBjoern A. Zeeb 				 * and reset counter
271*ebacd801SBjoern A. Zeeb 				 */
272*ebacd801SBjoern A. Zeeb 				ps.count_falses += tmp_false_count;
273*ebacd801SBjoern A. Zeeb 				tmp_false_count = 0;
274*ebacd801SBjoern A. Zeeb 			} else {
275*ebacd801SBjoern A. Zeeb 				/* this is a potential false one */
276*ebacd801SBjoern A. Zeeb 				tmp_false_count++;
277*ebacd801SBjoern A. Zeeb 			}
278*ebacd801SBjoern A. Zeeb 		}
279*ebacd801SBjoern A. Zeeb 		if (ps.count <= min_count)
280*ebacd801SBjoern A. Zeeb 			/* did not reach minimum count, drop sequence */
281*ebacd801SBjoern A. Zeeb 			continue;
282*ebacd801SBjoern A. Zeeb 
283*ebacd801SBjoern A. Zeeb 		/* this is a valid one, add it */
284*ebacd801SBjoern A. Zeeb 		ps.deadline_ts = ps.first_ts + ps.dur;
285*ebacd801SBjoern A. Zeeb 		new_ps = pool_get_pseq_elem();
286*ebacd801SBjoern A. Zeeb 		if (new_ps == NULL) {
287*ebacd801SBjoern A. Zeeb 			new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
288*ebacd801SBjoern A. Zeeb 			if (new_ps == NULL) {
289*ebacd801SBjoern A. Zeeb 				DFS_POOL_STAT_INC(pseq_alloc_error);
290*ebacd801SBjoern A. Zeeb 				return false;
291*ebacd801SBjoern A. Zeeb 			}
292*ebacd801SBjoern A. Zeeb 			DFS_POOL_STAT_INC(pseq_allocated);
293*ebacd801SBjoern A. Zeeb 			DFS_POOL_STAT_INC(pseq_used);
294*ebacd801SBjoern A. Zeeb 		}
295*ebacd801SBjoern A. Zeeb 		memcpy(new_ps, &ps, sizeof(ps));
296*ebacd801SBjoern A. Zeeb 		INIT_LIST_HEAD(&new_ps->head);
297*ebacd801SBjoern A. Zeeb 		list_add(&new_ps->head, &pde->sequences);
298*ebacd801SBjoern A. Zeeb 	}
299*ebacd801SBjoern A. Zeeb 	return true;
300*ebacd801SBjoern A. Zeeb }
301*ebacd801SBjoern A. Zeeb 
302*ebacd801SBjoern A. Zeeb /* check new ts and add to all matching existing sequences */
303*ebacd801SBjoern A. Zeeb static u32
pseq_handler_add_to_existing_seqs(struct pri_detector * pde,u64 ts)304*ebacd801SBjoern A. Zeeb pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
305*ebacd801SBjoern A. Zeeb {
306*ebacd801SBjoern A. Zeeb 	u32 max_count = 0;
307*ebacd801SBjoern A. Zeeb 	struct pri_sequence *ps, *ps2;
308*ebacd801SBjoern A. Zeeb 	list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
309*ebacd801SBjoern A. Zeeb 		u32 delta_ts;
310*ebacd801SBjoern A. Zeeb 		u32 factor;
311*ebacd801SBjoern A. Zeeb 
312*ebacd801SBjoern A. Zeeb 		/* first ensure that sequence is within window */
313*ebacd801SBjoern A. Zeeb 		if (ts > ps->deadline_ts) {
314*ebacd801SBjoern A. Zeeb 			list_del_init(&ps->head);
315*ebacd801SBjoern A. Zeeb 			pool_put_pseq_elem(ps);
316*ebacd801SBjoern A. Zeeb 			continue;
317*ebacd801SBjoern A. Zeeb 		}
318*ebacd801SBjoern A. Zeeb 
319*ebacd801SBjoern A. Zeeb 		delta_ts = ts - ps->last_ts;
320*ebacd801SBjoern A. Zeeb 		factor = pde_get_multiple(delta_ts, ps->pri,
321*ebacd801SBjoern A. Zeeb 					  pde->rs->max_pri_tolerance);
322*ebacd801SBjoern A. Zeeb 		if (factor > 0) {
323*ebacd801SBjoern A. Zeeb 			ps->last_ts = ts;
324*ebacd801SBjoern A. Zeeb 			ps->count++;
325*ebacd801SBjoern A. Zeeb 
326*ebacd801SBjoern A. Zeeb 			if (max_count < ps->count)
327*ebacd801SBjoern A. Zeeb 				max_count = ps->count;
328*ebacd801SBjoern A. Zeeb 		} else {
329*ebacd801SBjoern A. Zeeb 			ps->count_falses++;
330*ebacd801SBjoern A. Zeeb 		}
331*ebacd801SBjoern A. Zeeb 	}
332*ebacd801SBjoern A. Zeeb 	return max_count;
333*ebacd801SBjoern A. Zeeb }
334*ebacd801SBjoern A. Zeeb 
335*ebacd801SBjoern A. Zeeb static struct pri_sequence *
pseq_handler_check_detection(struct pri_detector * pde)336*ebacd801SBjoern A. Zeeb pseq_handler_check_detection(struct pri_detector *pde)
337*ebacd801SBjoern A. Zeeb {
338*ebacd801SBjoern A. Zeeb 	struct pri_sequence *ps;
339*ebacd801SBjoern A. Zeeb 
340*ebacd801SBjoern A. Zeeb 	if (list_empty(&pde->sequences))
341*ebacd801SBjoern A. Zeeb 		return NULL;
342*ebacd801SBjoern A. Zeeb 
343*ebacd801SBjoern A. Zeeb 	list_for_each_entry(ps, &pde->sequences, head) {
344*ebacd801SBjoern A. Zeeb 		/*
345*ebacd801SBjoern A. Zeeb 		 * we assume to have enough matching confidence if we
346*ebacd801SBjoern A. Zeeb 		 * 1) have enough pulses
347*ebacd801SBjoern A. Zeeb 		 * 2) have more matching than false pulses
348*ebacd801SBjoern A. Zeeb 		 */
349*ebacd801SBjoern A. Zeeb 		if ((ps->count >= pde->rs->ppb_thresh) &&
350*ebacd801SBjoern A. Zeeb 		    (ps->count * pde->rs->num_pri >= ps->count_falses))
351*ebacd801SBjoern A. Zeeb 			return ps;
352*ebacd801SBjoern A. Zeeb 	}
353*ebacd801SBjoern A. Zeeb 	return NULL;
354*ebacd801SBjoern A. Zeeb }
355*ebacd801SBjoern A. Zeeb 
356*ebacd801SBjoern A. Zeeb 
357*ebacd801SBjoern A. Zeeb /* free pulse queue and sequences list and give objects back to pools */
pri_detector_reset(struct pri_detector * pde,u64 ts)358*ebacd801SBjoern A. Zeeb static void pri_detector_reset(struct pri_detector *pde, u64 ts)
359*ebacd801SBjoern A. Zeeb {
360*ebacd801SBjoern A. Zeeb 	struct pri_sequence *ps, *ps0;
361*ebacd801SBjoern A. Zeeb 	struct pulse_elem *p, *p0;
362*ebacd801SBjoern A. Zeeb 	list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
363*ebacd801SBjoern A. Zeeb 		list_del_init(&ps->head);
364*ebacd801SBjoern A. Zeeb 		pool_put_pseq_elem(ps);
365*ebacd801SBjoern A. Zeeb 	}
366*ebacd801SBjoern A. Zeeb 	list_for_each_entry_safe(p, p0, &pde->pulses, head) {
367*ebacd801SBjoern A. Zeeb 		list_del_init(&p->head);
368*ebacd801SBjoern A. Zeeb 		pool_put_pulse_elem(p);
369*ebacd801SBjoern A. Zeeb 	}
370*ebacd801SBjoern A. Zeeb 	pde->count = 0;
371*ebacd801SBjoern A. Zeeb 	pde->last_ts = ts;
372*ebacd801SBjoern A. Zeeb }
373*ebacd801SBjoern A. Zeeb 
pri_detector_exit(struct pri_detector * de)374*ebacd801SBjoern A. Zeeb static void pri_detector_exit(struct pri_detector *de)
375*ebacd801SBjoern A. Zeeb {
376*ebacd801SBjoern A. Zeeb 	pri_detector_reset(de, 0);
377*ebacd801SBjoern A. Zeeb 	pool_deregister_ref();
378*ebacd801SBjoern A. Zeeb 	kfree(de);
379*ebacd801SBjoern A. Zeeb }
380*ebacd801SBjoern A. Zeeb 
pri_detector_add_pulse(struct pri_detector * de,struct pulse_event * event)381*ebacd801SBjoern A. Zeeb static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
382*ebacd801SBjoern A. Zeeb 						   struct pulse_event *event)
383*ebacd801SBjoern A. Zeeb {
384*ebacd801SBjoern A. Zeeb 	u32 max_updated_seq;
385*ebacd801SBjoern A. Zeeb 	struct pri_sequence *ps;
386*ebacd801SBjoern A. Zeeb 	u64 ts = event->ts;
387*ebacd801SBjoern A. Zeeb 	const struct radar_detector_specs *rs = de->rs;
388*ebacd801SBjoern A. Zeeb 
389*ebacd801SBjoern A. Zeeb 	/* ignore pulses not within width range */
390*ebacd801SBjoern A. Zeeb 	if ((rs->width_min > event->width) || (rs->width_max < event->width))
391*ebacd801SBjoern A. Zeeb 		return NULL;
392*ebacd801SBjoern A. Zeeb 
393*ebacd801SBjoern A. Zeeb 	if ((ts - de->last_ts) < rs->max_pri_tolerance)
394*ebacd801SBjoern A. Zeeb 		/* if delta to last pulse is too short, don't use this pulse */
395*ebacd801SBjoern A. Zeeb 		return NULL;
396*ebacd801SBjoern A. Zeeb 	/* radar detector spec needs chirp, but not detected */
397*ebacd801SBjoern A. Zeeb 	if (rs->chirp && rs->chirp != event->chirp)
398*ebacd801SBjoern A. Zeeb 		return NULL;
399*ebacd801SBjoern A. Zeeb 
400*ebacd801SBjoern A. Zeeb 	de->last_ts = ts;
401*ebacd801SBjoern A. Zeeb 
402*ebacd801SBjoern A. Zeeb 	max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
403*ebacd801SBjoern A. Zeeb 
404*ebacd801SBjoern A. Zeeb 	if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
405*ebacd801SBjoern A. Zeeb 		pri_detector_reset(de, ts);
406*ebacd801SBjoern A. Zeeb 		return NULL;
407*ebacd801SBjoern A. Zeeb 	}
408*ebacd801SBjoern A. Zeeb 
409*ebacd801SBjoern A. Zeeb 	ps = pseq_handler_check_detection(de);
410*ebacd801SBjoern A. Zeeb 
411*ebacd801SBjoern A. Zeeb 	if (ps == NULL)
412*ebacd801SBjoern A. Zeeb 		pulse_queue_enqueue(de, ts);
413*ebacd801SBjoern A. Zeeb 
414*ebacd801SBjoern A. Zeeb 	return ps;
415*ebacd801SBjoern A. Zeeb }
416*ebacd801SBjoern A. Zeeb 
pri_detector_init(const struct radar_detector_specs * rs)417*ebacd801SBjoern A. Zeeb struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
418*ebacd801SBjoern A. Zeeb {
419*ebacd801SBjoern A. Zeeb 	struct pri_detector *de;
420*ebacd801SBjoern A. Zeeb 
421*ebacd801SBjoern A. Zeeb 	de = kzalloc(sizeof(*de), GFP_ATOMIC);
422*ebacd801SBjoern A. Zeeb 	if (de == NULL)
423*ebacd801SBjoern A. Zeeb 		return NULL;
424*ebacd801SBjoern A. Zeeb 	de->exit = pri_detector_exit;
425*ebacd801SBjoern A. Zeeb 	de->add_pulse = pri_detector_add_pulse;
426*ebacd801SBjoern A. Zeeb 	de->reset = pri_detector_reset;
427*ebacd801SBjoern A. Zeeb 
428*ebacd801SBjoern A. Zeeb 	INIT_LIST_HEAD(&de->sequences);
429*ebacd801SBjoern A. Zeeb 	INIT_LIST_HEAD(&de->pulses);
430*ebacd801SBjoern A. Zeeb 	de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
431*ebacd801SBjoern A. Zeeb 	de->max_count = rs->ppb * 2;
432*ebacd801SBjoern A. Zeeb 	de->rs = rs;
433*ebacd801SBjoern A. Zeeb 
434*ebacd801SBjoern A. Zeeb 	pool_register_ref();
435*ebacd801SBjoern A. Zeeb 	return de;
436*ebacd801SBjoern A. Zeeb }
437