xref: /linux/drivers/counter/counter-chrdev.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic Counter character device interface
4  * Copyright (C) 2020 William Breathitt Gray
5  */
6 #include <linux/cdev.h>
7 #include <linux/counter.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/kfifo.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/nospec.h>
16 #include <linux/poll.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/timekeeping.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23 
24 #include "counter-chrdev.h"
25 
26 struct counter_comp_node {
27 	struct list_head l;
28 	struct counter_component component;
29 	struct counter_comp comp;
30 	void *parent;
31 };
32 
33 #define counter_comp_read_is_equal(a, b) \
34 	(a.action_read == b.action_read || \
35 	a.device_u8_read == b.device_u8_read || \
36 	a.count_u8_read == b.count_u8_read || \
37 	a.signal_u8_read == b.signal_u8_read || \
38 	a.device_u32_read == b.device_u32_read || \
39 	a.count_u32_read == b.count_u32_read || \
40 	a.signal_u32_read == b.signal_u32_read || \
41 	a.device_u64_read == b.device_u64_read || \
42 	a.count_u64_read == b.count_u64_read || \
43 	a.signal_u64_read == b.signal_u64_read || \
44 	a.signal_array_u32_read == b.signal_array_u32_read || \
45 	a.device_array_u64_read == b.device_array_u64_read || \
46 	a.count_array_u64_read == b.count_array_u64_read || \
47 	a.signal_array_u64_read == b.signal_array_u64_read)
48 
49 #define counter_comp_read_is_set(comp) \
50 	(comp.action_read || \
51 	comp.device_u8_read || \
52 	comp.count_u8_read || \
53 	comp.signal_u8_read || \
54 	comp.device_u32_read || \
55 	comp.count_u32_read || \
56 	comp.signal_u32_read || \
57 	comp.device_u64_read || \
58 	comp.count_u64_read || \
59 	comp.signal_u64_read || \
60 	comp.signal_array_u32_read || \
61 	comp.device_array_u64_read || \
62 	comp.count_array_u64_read || \
63 	comp.signal_array_u64_read)
64 
counter_chrdev_read(struct file * filp,char __user * buf,size_t len,loff_t * f_ps)65 static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
66 				   size_t len, loff_t *f_ps)
67 {
68 	struct counter_device *const counter = filp->private_data;
69 	int err;
70 	unsigned int copied;
71 
72 	if (!counter->ops)
73 		return -ENODEV;
74 
75 	if (len < sizeof(struct counter_event))
76 		return -EINVAL;
77 
78 	do {
79 		if (kfifo_is_empty(&counter->events)) {
80 			if (filp->f_flags & O_NONBLOCK)
81 				return -EAGAIN;
82 
83 			err = wait_event_interruptible(counter->events_wait,
84 					!kfifo_is_empty(&counter->events) ||
85 					!counter->ops);
86 			if (err < 0)
87 				return err;
88 			if (!counter->ops)
89 				return -ENODEV;
90 		}
91 
92 		if (mutex_lock_interruptible(&counter->events_out_lock))
93 			return -ERESTARTSYS;
94 		err = kfifo_to_user(&counter->events, buf, len, &copied);
95 		mutex_unlock(&counter->events_out_lock);
96 		if (err < 0)
97 			return err;
98 	} while (!copied);
99 
100 	return copied;
101 }
102 
counter_chrdev_poll(struct file * filp,struct poll_table_struct * pollt)103 static __poll_t counter_chrdev_poll(struct file *filp,
104 				    struct poll_table_struct *pollt)
105 {
106 	struct counter_device *const counter = filp->private_data;
107 	__poll_t events = 0;
108 
109 	if (!counter->ops)
110 		return events;
111 
112 	poll_wait(filp, &counter->events_wait, pollt);
113 
114 	if (!kfifo_is_empty(&counter->events))
115 		events = EPOLLIN | EPOLLRDNORM;
116 
117 	return events;
118 }
119 
counter_events_list_free(struct list_head * const events_list)120 static void counter_events_list_free(struct list_head *const events_list)
121 {
122 	struct counter_event_node *p, *n;
123 	struct counter_comp_node *q, *o;
124 
125 	list_for_each_entry_safe(p, n, events_list, l) {
126 		/* Free associated component nodes */
127 		list_for_each_entry_safe(q, o, &p->comp_list, l) {
128 			list_del(&q->l);
129 			kfree(q);
130 		}
131 
132 		/* Free event node */
133 		list_del(&p->l);
134 		kfree(p);
135 	}
136 }
137 
counter_set_event_node(struct counter_device * const counter,struct counter_watch * const watch,const struct counter_comp_node * const cfg)138 static int counter_set_event_node(struct counter_device *const counter,
139 				  struct counter_watch *const watch,
140 				  const struct counter_comp_node *const cfg)
141 {
142 	struct counter_event_node *event_node;
143 	int err = 0;
144 	struct counter_comp_node *comp_node;
145 
146 	/* Search for event in the list */
147 	list_for_each_entry(event_node, &counter->next_events_list, l)
148 		if (event_node->event == watch->event &&
149 		    event_node->channel == watch->channel)
150 			break;
151 
152 	/* If event is not already in the list */
153 	if (&event_node->l == &counter->next_events_list) {
154 		/* Allocate new event node */
155 		event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
156 		if (!event_node)
157 			return -ENOMEM;
158 
159 		/* Configure event node and add to the list */
160 		event_node->event = watch->event;
161 		event_node->channel = watch->channel;
162 		INIT_LIST_HEAD(&event_node->comp_list);
163 		list_add(&event_node->l, &counter->next_events_list);
164 	}
165 
166 	/* Check if component watch has already been set before */
167 	list_for_each_entry(comp_node, &event_node->comp_list, l)
168 		if (comp_node->parent == cfg->parent &&
169 		    counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
170 			err = -EINVAL;
171 			goto exit_free_event_node;
172 		}
173 
174 	/* Allocate component node */
175 	comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
176 	if (!comp_node) {
177 		err = -ENOMEM;
178 		goto exit_free_event_node;
179 	}
180 	*comp_node = *cfg;
181 
182 	/* Add component node to event node */
183 	list_add_tail(&comp_node->l, &event_node->comp_list);
184 
185 exit_free_event_node:
186 	/* Free event node if no one else is watching */
187 	if (list_empty(&event_node->comp_list)) {
188 		list_del(&event_node->l);
189 		kfree(event_node);
190 	}
191 
192 	return err;
193 }
194 
counter_enable_events(struct counter_device * const counter)195 static int counter_enable_events(struct counter_device *const counter)
196 {
197 	unsigned long flags;
198 	int err = 0;
199 
200 	mutex_lock(&counter->n_events_list_lock);
201 	spin_lock_irqsave(&counter->events_list_lock, flags);
202 
203 	counter_events_list_free(&counter->events_list);
204 	list_replace_init(&counter->next_events_list,
205 			  &counter->events_list);
206 
207 	if (counter->ops->events_configure)
208 		err = counter->ops->events_configure(counter);
209 
210 	spin_unlock_irqrestore(&counter->events_list_lock, flags);
211 	mutex_unlock(&counter->n_events_list_lock);
212 
213 	return err;
214 }
215 
counter_disable_events(struct counter_device * const counter)216 static int counter_disable_events(struct counter_device *const counter)
217 {
218 	unsigned long flags;
219 	int err = 0;
220 
221 	spin_lock_irqsave(&counter->events_list_lock, flags);
222 
223 	counter_events_list_free(&counter->events_list);
224 
225 	if (counter->ops->events_configure)
226 		err = counter->ops->events_configure(counter);
227 
228 	spin_unlock_irqrestore(&counter->events_list_lock, flags);
229 
230 	mutex_lock(&counter->n_events_list_lock);
231 
232 	counter_events_list_free(&counter->next_events_list);
233 
234 	mutex_unlock(&counter->n_events_list_lock);
235 
236 	return err;
237 }
238 
counter_get_ext(const struct counter_comp * const ext,const size_t num_ext,const size_t component_id,size_t * const ext_idx,size_t * const id)239 static int counter_get_ext(const struct counter_comp *const ext,
240 			   const size_t num_ext, const size_t component_id,
241 			   size_t *const ext_idx, size_t *const id)
242 {
243 	struct counter_array *element;
244 
245 	*id = 0;
246 	for (*ext_idx = 0; *ext_idx < num_ext; (*ext_idx)++) {
247 		if (*id == component_id)
248 			return 0;
249 
250 		if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
251 			element = ext[*ext_idx].priv;
252 
253 			if (component_id - *id < element->length)
254 				return 0;
255 
256 			*id += element->length;
257 		} else
258 			(*id)++;
259 	}
260 
261 	return -EINVAL;
262 }
263 
counter_add_watch(struct counter_device * const counter,const unsigned long arg)264 static int counter_add_watch(struct counter_device *const counter,
265 			     const unsigned long arg)
266 {
267 	void __user *const uwatch = (void __user *)arg;
268 	struct counter_watch watch;
269 	struct counter_comp_node comp_node = {};
270 	size_t parent, id;
271 	struct counter_comp *ext;
272 	size_t num_ext;
273 	size_t ext_idx, ext_id;
274 	int err = 0;
275 
276 	if (copy_from_user(&watch, uwatch, sizeof(watch)))
277 		return -EFAULT;
278 
279 	if (watch.component.type == COUNTER_COMPONENT_NONE)
280 		goto no_component;
281 
282 	parent = watch.component.parent;
283 
284 	/* Configure parent component info for comp node */
285 	switch (watch.component.scope) {
286 	case COUNTER_SCOPE_DEVICE:
287 		ext = counter->ext;
288 		num_ext = counter->num_ext;
289 		break;
290 	case COUNTER_SCOPE_SIGNAL:
291 		if (parent >= counter->num_signals)
292 			return -EINVAL;
293 		parent = array_index_nospec(parent, counter->num_signals);
294 
295 		comp_node.parent = counter->signals + parent;
296 
297 		ext = counter->signals[parent].ext;
298 		num_ext = counter->signals[parent].num_ext;
299 		break;
300 	case COUNTER_SCOPE_COUNT:
301 		if (parent >= counter->num_counts)
302 			return -EINVAL;
303 		parent = array_index_nospec(parent, counter->num_counts);
304 
305 		comp_node.parent = counter->counts + parent;
306 
307 		ext = counter->counts[parent].ext;
308 		num_ext = counter->counts[parent].num_ext;
309 		break;
310 	default:
311 		return -EINVAL;
312 	}
313 
314 	id = watch.component.id;
315 
316 	/* Configure component info for comp node */
317 	switch (watch.component.type) {
318 	case COUNTER_COMPONENT_SIGNAL:
319 		if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
320 			return -EINVAL;
321 
322 		comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
323 		comp_node.comp.signal_u32_read = counter->ops->signal_read;
324 		break;
325 	case COUNTER_COMPONENT_COUNT:
326 		if (watch.component.scope != COUNTER_SCOPE_COUNT)
327 			return -EINVAL;
328 
329 		comp_node.comp.type = COUNTER_COMP_U64;
330 		comp_node.comp.count_u64_read = counter->ops->count_read;
331 		break;
332 	case COUNTER_COMPONENT_FUNCTION:
333 		if (watch.component.scope != COUNTER_SCOPE_COUNT)
334 			return -EINVAL;
335 
336 		comp_node.comp.type = COUNTER_COMP_FUNCTION;
337 		comp_node.comp.count_u32_read = counter->ops->function_read;
338 		break;
339 	case COUNTER_COMPONENT_SYNAPSE_ACTION:
340 		if (watch.component.scope != COUNTER_SCOPE_COUNT)
341 			return -EINVAL;
342 		if (id >= counter->counts[parent].num_synapses)
343 			return -EINVAL;
344 		id = array_index_nospec(id, counter->counts[parent].num_synapses);
345 
346 		comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
347 		comp_node.comp.action_read = counter->ops->action_read;
348 		comp_node.comp.priv = counter->counts[parent].synapses + id;
349 		break;
350 	case COUNTER_COMPONENT_EXTENSION:
351 		err = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
352 		if (err < 0)
353 			return err;
354 
355 		comp_node.comp = ext[ext_idx];
356 		break;
357 	default:
358 		return -EINVAL;
359 	}
360 	if (!counter_comp_read_is_set(comp_node.comp))
361 		return -EOPNOTSUPP;
362 
363 no_component:
364 	mutex_lock(&counter->n_events_list_lock);
365 
366 	if (counter->ops->watch_validate) {
367 		err = counter->ops->watch_validate(counter, &watch);
368 		if (err < 0)
369 			goto err_exit;
370 	}
371 
372 	comp_node.component = watch.component;
373 
374 	err = counter_set_event_node(counter, &watch, &comp_node);
375 
376 err_exit:
377 	mutex_unlock(&counter->n_events_list_lock);
378 
379 	return err;
380 }
381 
counter_chrdev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)382 static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
383 				 unsigned long arg)
384 {
385 	struct counter_device *const counter = filp->private_data;
386 	int ret = -ENODEV;
387 
388 	mutex_lock(&counter->ops_exist_lock);
389 
390 	if (!counter->ops)
391 		goto out_unlock;
392 
393 	switch (cmd) {
394 	case COUNTER_ADD_WATCH_IOCTL:
395 		ret = counter_add_watch(counter, arg);
396 		break;
397 	case COUNTER_ENABLE_EVENTS_IOCTL:
398 		ret = counter_enable_events(counter);
399 		break;
400 	case COUNTER_DISABLE_EVENTS_IOCTL:
401 		ret = counter_disable_events(counter);
402 		break;
403 	default:
404 		ret = -ENOIOCTLCMD;
405 		break;
406 	}
407 
408 out_unlock:
409 	mutex_unlock(&counter->ops_exist_lock);
410 
411 	return ret;
412 }
413 
counter_chrdev_open(struct inode * inode,struct file * filp)414 static int counter_chrdev_open(struct inode *inode, struct file *filp)
415 {
416 	struct counter_device *const counter = container_of(inode->i_cdev,
417 							    typeof(*counter),
418 							    chrdev);
419 
420 	get_device(&counter->dev);
421 	filp->private_data = counter;
422 
423 	return nonseekable_open(inode, filp);
424 }
425 
counter_chrdev_release(struct inode * inode,struct file * filp)426 static int counter_chrdev_release(struct inode *inode, struct file *filp)
427 {
428 	struct counter_device *const counter = filp->private_data;
429 	int ret = 0;
430 
431 	mutex_lock(&counter->ops_exist_lock);
432 
433 	if (!counter->ops) {
434 		/* Free any lingering held memory */
435 		counter_events_list_free(&counter->events_list);
436 		counter_events_list_free(&counter->next_events_list);
437 		ret = -ENODEV;
438 		goto out_unlock;
439 	}
440 
441 	ret = counter_disable_events(counter);
442 	if (ret < 0) {
443 		mutex_unlock(&counter->ops_exist_lock);
444 		return ret;
445 	}
446 
447 out_unlock:
448 	mutex_unlock(&counter->ops_exist_lock);
449 
450 	put_device(&counter->dev);
451 
452 	return ret;
453 }
454 
455 static const struct file_operations counter_fops = {
456 	.owner = THIS_MODULE,
457 	.read = counter_chrdev_read,
458 	.poll = counter_chrdev_poll,
459 	.unlocked_ioctl = counter_chrdev_ioctl,
460 	.open = counter_chrdev_open,
461 	.release = counter_chrdev_release,
462 };
463 
counter_chrdev_add(struct counter_device * const counter)464 int counter_chrdev_add(struct counter_device *const counter)
465 {
466 	/* Initialize Counter events lists */
467 	INIT_LIST_HEAD(&counter->events_list);
468 	INIT_LIST_HEAD(&counter->next_events_list);
469 	spin_lock_init(&counter->events_list_lock);
470 	mutex_init(&counter->n_events_list_lock);
471 	init_waitqueue_head(&counter->events_wait);
472 	spin_lock_init(&counter->events_in_lock);
473 	mutex_init(&counter->events_out_lock);
474 
475 	/* Initialize character device */
476 	cdev_init(&counter->chrdev, &counter_fops);
477 
478 	/* Allocate Counter events queue */
479 	return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
480 }
481 
counter_chrdev_remove(struct counter_device * const counter)482 void counter_chrdev_remove(struct counter_device *const counter)
483 {
484 	kfifo_free(&counter->events);
485 }
486 
counter_get_array_data(struct counter_device * const counter,const enum counter_scope scope,void * const parent,const struct counter_comp * const comp,const size_t idx,u64 * const value)487 static int counter_get_array_data(struct counter_device *const counter,
488 				  const enum counter_scope scope,
489 				  void *const parent,
490 				  const struct counter_comp *const comp,
491 				  const size_t idx, u64 *const value)
492 {
493 	const struct counter_array *const element = comp->priv;
494 	u32 value_u32 = 0;
495 	int ret;
496 
497 	switch (element->type) {
498 	case COUNTER_COMP_SIGNAL_POLARITY:
499 		if (scope != COUNTER_SCOPE_SIGNAL)
500 			return -EINVAL;
501 		ret = comp->signal_array_u32_read(counter, parent, idx,
502 						  &value_u32);
503 		*value = value_u32;
504 		return ret;
505 	case COUNTER_COMP_U64:
506 		switch (scope) {
507 		case COUNTER_SCOPE_DEVICE:
508 			return comp->device_array_u64_read(counter, idx, value);
509 		case COUNTER_SCOPE_SIGNAL:
510 			return comp->signal_array_u64_read(counter, parent, idx,
511 							   value);
512 		case COUNTER_SCOPE_COUNT:
513 			return comp->count_array_u64_read(counter, parent, idx,
514 							  value);
515 		default:
516 			return -EINVAL;
517 		}
518 	default:
519 		return -EINVAL;
520 	}
521 }
522 
counter_get_data(struct counter_device * const counter,const struct counter_comp_node * const comp_node,u64 * const value)523 static int counter_get_data(struct counter_device *const counter,
524 			    const struct counter_comp_node *const comp_node,
525 			    u64 *const value)
526 {
527 	const struct counter_comp *const comp = &comp_node->comp;
528 	const enum counter_scope scope = comp_node->component.scope;
529 	const size_t id = comp_node->component.id;
530 	struct counter_signal *const signal = comp_node->parent;
531 	struct counter_count *const count = comp_node->parent;
532 	u8 value_u8 = 0;
533 	u32 value_u32 = 0;
534 	const struct counter_comp *ext;
535 	size_t num_ext;
536 	size_t ext_idx, ext_id;
537 	int ret;
538 
539 	if (comp_node->component.type == COUNTER_COMPONENT_NONE)
540 		return 0;
541 
542 	switch (comp->type) {
543 	case COUNTER_COMP_U8:
544 	case COUNTER_COMP_BOOL:
545 		switch (scope) {
546 		case COUNTER_SCOPE_DEVICE:
547 			ret = comp->device_u8_read(counter, &value_u8);
548 			break;
549 		case COUNTER_SCOPE_SIGNAL:
550 			ret = comp->signal_u8_read(counter, signal, &value_u8);
551 			break;
552 		case COUNTER_SCOPE_COUNT:
553 			ret = comp->count_u8_read(counter, count, &value_u8);
554 			break;
555 		default:
556 			return -EINVAL;
557 		}
558 		*value = value_u8;
559 		return ret;
560 	case COUNTER_COMP_SIGNAL_LEVEL:
561 	case COUNTER_COMP_FUNCTION:
562 	case COUNTER_COMP_ENUM:
563 	case COUNTER_COMP_COUNT_DIRECTION:
564 	case COUNTER_COMP_COUNT_MODE:
565 	case COUNTER_COMP_SIGNAL_POLARITY:
566 		switch (scope) {
567 		case COUNTER_SCOPE_DEVICE:
568 			ret = comp->device_u32_read(counter, &value_u32);
569 			break;
570 		case COUNTER_SCOPE_SIGNAL:
571 			ret = comp->signal_u32_read(counter, signal,
572 						    &value_u32);
573 			break;
574 		case COUNTER_SCOPE_COUNT:
575 			ret = comp->count_u32_read(counter, count, &value_u32);
576 			break;
577 		default:
578 			return -EINVAL;
579 		}
580 		*value = value_u32;
581 		return ret;
582 	case COUNTER_COMP_U64:
583 		switch (scope) {
584 		case COUNTER_SCOPE_DEVICE:
585 			return comp->device_u64_read(counter, value);
586 		case COUNTER_SCOPE_SIGNAL:
587 			return comp->signal_u64_read(counter, signal, value);
588 		case COUNTER_SCOPE_COUNT:
589 			return comp->count_u64_read(counter, count, value);
590 		default:
591 			return -EINVAL;
592 		}
593 	case COUNTER_COMP_SYNAPSE_ACTION:
594 		ret = comp->action_read(counter, count, comp->priv, &value_u32);
595 		*value = value_u32;
596 		return ret;
597 	case COUNTER_COMP_ARRAY:
598 		switch (scope) {
599 		case COUNTER_SCOPE_DEVICE:
600 			ext = counter->ext;
601 			num_ext = counter->num_ext;
602 			break;
603 		case COUNTER_SCOPE_SIGNAL:
604 			ext = signal->ext;
605 			num_ext = signal->num_ext;
606 			break;
607 		case COUNTER_SCOPE_COUNT:
608 			ext = count->ext;
609 			num_ext = count->num_ext;
610 			break;
611 		default:
612 			return -EINVAL;
613 		}
614 		ret = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
615 		if (ret < 0)
616 			return ret;
617 
618 		return counter_get_array_data(counter, scope, comp_node->parent,
619 					      comp, id - ext_id, value);
620 	default:
621 		return -EINVAL;
622 	}
623 }
624 
625 /**
626  * counter_push_event - queue event for userspace reading
627  * @counter:	pointer to Counter structure
628  * @event:	triggered event
629  * @channel:	event channel
630  *
631  * Note: If no one is watching for the respective event, it is silently
632  * discarded.
633  */
counter_push_event(struct counter_device * const counter,const u8 event,const u8 channel)634 void counter_push_event(struct counter_device *const counter, const u8 event,
635 			const u8 channel)
636 {
637 	struct counter_event ev;
638 	unsigned int copied = 0;
639 	unsigned long flags;
640 	struct counter_event_node *event_node;
641 	struct counter_comp_node *comp_node;
642 
643 	ev.timestamp = ktime_get_ns();
644 	ev.watch.event = event;
645 	ev.watch.channel = channel;
646 
647 	/* Could be in an interrupt context, so use a spin lock */
648 	spin_lock_irqsave(&counter->events_list_lock, flags);
649 
650 	/* Search for event in the list */
651 	list_for_each_entry(event_node, &counter->events_list, l)
652 		if (event_node->event == event &&
653 		    event_node->channel == channel)
654 			break;
655 
656 	/* If event is not in the list */
657 	if (&event_node->l == &counter->events_list)
658 		goto exit_early;
659 
660 	/* Read and queue relevant comp for userspace */
661 	list_for_each_entry(comp_node, &event_node->comp_list, l) {
662 		ev.watch.component = comp_node->component;
663 		ev.status = -counter_get_data(counter, comp_node, &ev.value);
664 
665 		copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
666 							1, &counter->events_in_lock);
667 	}
668 
669 exit_early:
670 	spin_unlock_irqrestore(&counter->events_list_lock, flags);
671 
672 	if (copied)
673 		wake_up_poll(&counter->events_wait, EPOLLIN);
674 }
675 EXPORT_SYMBOL_NS_GPL(counter_push_event, "COUNTER");
676