xref: /linux/drivers/soc/xilinx/xlnx_event_manager.c (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Event Management Driver
4  *
5  *  Copyright (C) 2021 Xilinx, Inc.
6  *  Copyright (C) 2024 Advanced Micro Devices, Inc.
7  *
8  *  Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
9  */
10 
11 #include <linux/cpuhotplug.h>
12 #include <linux/firmware/xlnx-event-manager.h>
13 #include <linux/firmware/xlnx-zynqmp.h>
14 #include <linux/hashtable.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 
23 static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
24 
25 static int virq_sgi;
26 static int event_manager_availability = -EACCES;
27 
28 /* SGI number used for Event management driver */
29 #define XLNX_EVENT_SGI_NUM	(15)
30 
31 /* Max number of driver can register for same event */
32 #define MAX_DRIVER_PER_EVENT	(10U)
33 
34 /* Max HashMap Order for PM API feature check (1<<7 = 128) */
35 #define REGISTERED_DRIVER_MAX_ORDER	(7)
36 
37 #define MAX_BITS	(32U) /* Number of bits available for error mask */
38 
39 #define REGISTER_NOTIFIER_FIRMWARE_VERSION	(2U)
40 
41 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
42 static int sgi_num = XLNX_EVENT_SGI_NUM;
43 
44 static bool is_need_to_unregister;
45 
46 /**
47  * struct agent_cb - Registered callback function and private data.
48  * @agent_data:		Data passed back to handler function.
49  * @eve_cb:		Function pointer to store the callback function.
50  * @list:		member to create list.
51  */
52 struct agent_cb {
53 	void *agent_data;
54 	event_cb_func_t eve_cb;
55 	struct list_head list;
56 };
57 
58 /**
59  * struct registered_event_data - Registered Event Data.
60  * @key:		key is the combine id(Node-Id | Event-Id) of type u64
61  *			where upper u32 for Node-Id and lower u32 for Event-Id,
62  *			And this used as key to index into hashmap.
63  * @cb_type:		Type of Api callback, like PM_NOTIFY_CB, etc.
64  * @wake:		If this flag set, firmware will wake up processor if is
65  *			in sleep or power down state.
66  * @cb_list_head:	Head of call back data list which contain the information
67  *			about registered handler and private data.
68  * @hentry:		hlist_node that hooks this entry into hashtable.
69  */
70 struct registered_event_data {
71 	u64 key;
72 	enum pm_api_cb_id cb_type;
73 	bool wake;
74 	struct list_head cb_list_head;
75 	struct hlist_node hentry;
76 };
77 
78 static bool xlnx_is_error_event(const u32 node_id)
79 {
80 	u32 pm_family_code, pm_sub_family_code;
81 
82 	zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
83 
84 	if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) {
85 		if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 ||
86 		    node_id == VERSAL_EVENT_ERROR_PMC_ERR2 ||
87 		    node_id == VERSAL_EVENT_ERROR_PSM_ERR1 ||
88 		    node_id == VERSAL_EVENT_ERROR_PSM_ERR2)
89 			return true;
90 	} else {
91 		if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 ||
92 		    node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 ||
93 		    node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
94 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 ||
95 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 ||
96 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 ||
97 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4)
98 			return true;
99 	}
100 
101 	return false;
102 }
103 
104 static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
105 					event_cb_func_t cb_fun,	void *data)
106 {
107 	u64 key = 0;
108 	bool present_in_hash = false;
109 	struct registered_event_data *eve_data;
110 	struct agent_cb *cb_data;
111 	struct agent_cb *cb_pos;
112 	struct agent_cb *cb_next;
113 
114 	key = ((u64)node_id << 32U) | (u64)event;
115 	/* Check for existing entry in hash table for given key id */
116 	hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
117 		if (eve_data->key == key) {
118 			present_in_hash = true;
119 			break;
120 		}
121 	}
122 
123 	if (!present_in_hash) {
124 		/* Add new entry if not present in HASH table */
125 		eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
126 		if (!eve_data)
127 			return -ENOMEM;
128 		eve_data->key = key;
129 		eve_data->cb_type = PM_NOTIFY_CB;
130 		eve_data->wake = wake;
131 		INIT_LIST_HEAD(&eve_data->cb_list_head);
132 
133 		cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
134 		if (!cb_data) {
135 			kfree(eve_data);
136 			return -ENOMEM;
137 		}
138 		cb_data->eve_cb = cb_fun;
139 		cb_data->agent_data = data;
140 
141 		/* Add into callback list */
142 		list_add(&cb_data->list, &eve_data->cb_list_head);
143 
144 		/* Add into HASH table */
145 		hash_add(reg_driver_map, &eve_data->hentry, key);
146 	} else {
147 		/* Search for callback function and private data in list */
148 		list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
149 			if (cb_pos->eve_cb == cb_fun &&
150 			    cb_pos->agent_data == data) {
151 				return 0;
152 			}
153 		}
154 
155 		/* Add multiple handler and private data in list */
156 		cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
157 		if (!cb_data)
158 			return -ENOMEM;
159 		cb_data->eve_cb = cb_fun;
160 		cb_data->agent_data = data;
161 
162 		list_add(&cb_data->list, &eve_data->cb_list_head);
163 	}
164 
165 	return 0;
166 }
167 
168 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
169 {
170 	struct registered_event_data *eve_data;
171 	struct agent_cb *cb_data;
172 
173 	/* Check for existing entry in hash table for given cb_type */
174 	hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
175 		if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
176 			pr_err("Found as already registered\n");
177 			return -EINVAL;
178 		}
179 	}
180 
181 	/* Add new entry if not present */
182 	eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
183 	if (!eve_data)
184 		return -ENOMEM;
185 
186 	eve_data->key = 0;
187 	eve_data->cb_type = PM_INIT_SUSPEND_CB;
188 	INIT_LIST_HEAD(&eve_data->cb_list_head);
189 
190 	cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
191 	if (!cb_data) {
192 		kfree(eve_data);
193 		return -ENOMEM;
194 	}
195 	cb_data->eve_cb = cb_fun;
196 	cb_data->agent_data = data;
197 
198 	/* Add into callback list */
199 	list_add(&cb_data->list, &eve_data->cb_list_head);
200 
201 	hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
202 
203 	return 0;
204 }
205 
206 static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
207 {
208 	bool is_callback_found = false;
209 	struct registered_event_data *eve_data;
210 	struct agent_cb *cb_pos;
211 	struct agent_cb *cb_next;
212 	struct hlist_node *tmp;
213 
214 	is_need_to_unregister = false;
215 
216 	/* Check for existing entry in hash table for given cb_type */
217 	hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
218 		if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
219 			/* Delete the list of callback */
220 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
221 				if (cb_pos->eve_cb == cb_fun) {
222 					is_callback_found = true;
223 					list_del_init(&cb_pos->list);
224 					kfree(cb_pos);
225 				}
226 			}
227 			/* remove an object from a hashtable */
228 			hash_del(&eve_data->hentry);
229 			kfree(eve_data);
230 			is_need_to_unregister = true;
231 		}
232 	}
233 	if (!is_callback_found) {
234 		pr_warn("Didn't find any registered callback for suspend event\n");
235 		return -EINVAL;
236 	}
237 
238 	return 0;
239 }
240 
241 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
242 					   event_cb_func_t cb_fun, void *data)
243 {
244 	bool is_callback_found = false;
245 	struct registered_event_data *eve_data;
246 	u64 key = ((u64)node_id << 32U) | (u64)event;
247 	struct agent_cb *cb_pos;
248 	struct agent_cb *cb_next;
249 	struct hlist_node *tmp;
250 
251 	is_need_to_unregister = false;
252 
253 	/* Check for existing entry in hash table for given key id */
254 	hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
255 		if (eve_data->key == key) {
256 			/* Delete the list of callback */
257 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
258 				if (cb_pos->eve_cb == cb_fun &&
259 				    cb_pos->agent_data == data) {
260 					is_callback_found = true;
261 					list_del_init(&cb_pos->list);
262 					kfree(cb_pos);
263 				}
264 			}
265 
266 			/* Remove HASH table if callback list is empty */
267 			if (list_empty(&eve_data->cb_list_head)) {
268 				/* remove an object from a HASH table */
269 				hash_del(&eve_data->hentry);
270 				kfree(eve_data);
271 				is_need_to_unregister = true;
272 			}
273 		}
274 	}
275 	if (!is_callback_found) {
276 		pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
277 			node_id, event);
278 		return -EINVAL;
279 	}
280 
281 	return 0;
282 }
283 
284 /**
285  * xlnx_register_event() - Register for the event.
286  * @cb_type:	Type of callback from pm_api_cb_id,
287  *			PM_NOTIFY_CB - for Error Events,
288  *			PM_INIT_SUSPEND_CB - for suspend callback.
289  * @node_id:	Node-Id related to event.
290  * @event:	Event Mask for the Error Event.
291  * @wake:	Flag specifying whether the subsystem should be woken upon
292  *		event notification.
293  * @cb_fun:	Function pointer to store the callback function.
294  * @data:	Pointer for the driver instance.
295  *
296  * Return:	Returns 0 on successful registration else error code.
297  */
298 int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
299 			const bool wake, event_cb_func_t cb_fun, void *data)
300 {
301 	int ret = 0;
302 	u32 eve;
303 	int pos;
304 
305 	if (event_manager_availability)
306 		return event_manager_availability;
307 
308 	if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
309 		pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
310 		return -EINVAL;
311 	}
312 
313 	if (!cb_fun)
314 		return -EFAULT;
315 
316 	if (cb_type == PM_INIT_SUSPEND_CB) {
317 		ret = xlnx_add_cb_for_suspend(cb_fun, data);
318 	} else {
319 		if (!xlnx_is_error_event(node_id)) {
320 			/* Add entry for Node-Id/Event in hash table */
321 			ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
322 		} else {
323 			/* Add into Hash table */
324 			for (pos = 0; pos < MAX_BITS; pos++) {
325 				eve = event & (1 << pos);
326 				if (!eve)
327 					continue;
328 
329 				/* Add entry for Node-Id/Eve in hash table */
330 				ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
331 								   data);
332 				/* Break the loop if got error */
333 				if (ret)
334 					break;
335 			}
336 			if (ret) {
337 				/* Skip the Event for which got the error */
338 				pos--;
339 				/* Remove registered(during this call) event from hash table */
340 				for ( ; pos >= 0; pos--) {
341 					eve = event & (1 << pos);
342 					if (!eve)
343 						continue;
344 					xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
345 				}
346 			}
347 		}
348 
349 		if (ret) {
350 			pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
351 			       event, ret);
352 			return ret;
353 		}
354 
355 		/* Register for Node-Id/Event combination in firmware */
356 		ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
357 		if (ret) {
358 			pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
359 			       event, ret);
360 			/* Remove already registered event from hash table */
361 			if (xlnx_is_error_event(node_id)) {
362 				for (pos = 0; pos < MAX_BITS; pos++) {
363 					eve = event & (1 << pos);
364 					if (!eve)
365 						continue;
366 					xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
367 				}
368 			} else {
369 				xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
370 			}
371 			return ret;
372 		}
373 	}
374 
375 	return ret;
376 }
377 EXPORT_SYMBOL_GPL(xlnx_register_event);
378 
379 /**
380  * xlnx_unregister_event() - Unregister for the event.
381  * @cb_type:	Type of callback from pm_api_cb_id,
382  *			PM_NOTIFY_CB - for Error Events,
383  *			PM_INIT_SUSPEND_CB - for suspend callback.
384  * @node_id:	Node-Id related to event.
385  * @event:	Event Mask for the Error Event.
386  * @cb_fun:	Function pointer of callback function.
387  * @data:	Pointer of agent's private data.
388  *
389  * Return:	Returns 0 on successful unregistration else error code.
390  */
391 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
392 			  event_cb_func_t cb_fun, void *data)
393 {
394 	int ret = 0;
395 	u32 eve, pos;
396 
397 	is_need_to_unregister = false;
398 
399 	if (event_manager_availability)
400 		return event_manager_availability;
401 
402 	if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
403 		pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
404 		return -EINVAL;
405 	}
406 
407 	if (!cb_fun)
408 		return -EFAULT;
409 
410 	if (cb_type == PM_INIT_SUSPEND_CB) {
411 		ret = xlnx_remove_cb_for_suspend(cb_fun);
412 	} else {
413 		/* Remove Node-Id/Event from hash table */
414 		if (!xlnx_is_error_event(node_id)) {
415 			xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
416 		} else {
417 			for (pos = 0; pos < MAX_BITS; pos++) {
418 				eve = event & (1 << pos);
419 				if (!eve)
420 					continue;
421 
422 				xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
423 			}
424 		}
425 
426 		/* Un-register if list is empty */
427 		if (is_need_to_unregister) {
428 			/* Un-register for Node-Id/Event combination */
429 			ret = zynqmp_pm_register_notifier(node_id, event, false, false);
430 			if (ret) {
431 				pr_err("%s() failed for 0x%x and 0x%x: %d\n",
432 				       __func__, node_id, event, ret);
433 				return ret;
434 			}
435 		}
436 	}
437 
438 	return ret;
439 }
440 EXPORT_SYMBOL_GPL(xlnx_unregister_event);
441 
442 static void xlnx_call_suspend_cb_handler(const u32 *payload)
443 {
444 	bool is_callback_found = false;
445 	struct registered_event_data *eve_data;
446 	u32 cb_type = payload[0];
447 	struct agent_cb *cb_pos;
448 	struct agent_cb *cb_next;
449 
450 	/* Check for existing entry in hash table for given cb_type */
451 	hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
452 		if (eve_data->cb_type == cb_type) {
453 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
454 				cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
455 				is_callback_found = true;
456 			}
457 		}
458 	}
459 	if (!is_callback_found)
460 		pr_warn("Didn't find any registered callback for suspend event\n");
461 }
462 
463 static void xlnx_call_notify_cb_handler(const u32 *payload)
464 {
465 	bool is_callback_found = false;
466 	struct registered_event_data *eve_data;
467 	u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
468 	int ret;
469 	struct agent_cb *cb_pos;
470 	struct agent_cb *cb_next;
471 
472 	/* Check for existing entry in hash table for given key id */
473 	hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
474 		if (eve_data->key == key) {
475 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
476 				cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
477 				is_callback_found = true;
478 			}
479 
480 			/* re register with firmware to get future events */
481 			ret = zynqmp_pm_register_notifier(payload[1], payload[2],
482 							  eve_data->wake, true);
483 			if (ret) {
484 				pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
485 				       payload[1], payload[2], ret);
486 				list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
487 							 list) {
488 					/* Remove already registered event from hash table */
489 					xlnx_remove_cb_for_notify_event(payload[1], payload[2],
490 									cb_pos->eve_cb,
491 									cb_pos->agent_data);
492 				}
493 			}
494 		}
495 	}
496 	if (!is_callback_found)
497 		pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
498 			payload[1], payload[2]);
499 }
500 
501 static void xlnx_get_event_callback_data(u32 *buf)
502 {
503 	zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
504 }
505 
506 static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
507 {
508 	u32 cb_type, node_id, event, pos;
509 	u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
510 	u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
511 
512 	/* Get event data */
513 	xlnx_get_event_callback_data(payload);
514 
515 	/* First element is callback type, others are callback arguments */
516 	cb_type = payload[0];
517 
518 	if (cb_type == PM_NOTIFY_CB) {
519 		node_id = payload[1];
520 		event = payload[2];
521 		if (!xlnx_is_error_event(node_id)) {
522 			xlnx_call_notify_cb_handler(payload);
523 		} else {
524 			/*
525 			 * Each call back function expecting payload as an input arguments.
526 			 * We can get multiple error events as in one call back through error
527 			 * mask. So payload[2] may can contain multiple error events.
528 			 * In reg_driver_map database we store data in the combination of single
529 			 * node_id-error combination.
530 			 * So coping the payload message into event_data and update the
531 			 * event_data[2] with Error Mask for single error event and use
532 			 * event_data as input argument for registered call back function.
533 			 *
534 			 */
535 			memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
536 			/* Support Multiple Error Event */
537 			for (pos = 0; pos < MAX_BITS; pos++) {
538 				if ((0 == (event & (1 << pos))))
539 					continue;
540 				event_data[2] = (event & (1 << pos));
541 				xlnx_call_notify_cb_handler(event_data);
542 			}
543 		}
544 	} else if (cb_type == PM_INIT_SUSPEND_CB) {
545 		xlnx_call_suspend_cb_handler(payload);
546 	} else {
547 		pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
548 	}
549 
550 	return IRQ_HANDLED;
551 }
552 
553 static int xlnx_event_cpuhp_start(unsigned int cpu)
554 {
555 	enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
556 
557 	return 0;
558 }
559 
560 static int xlnx_event_cpuhp_down(unsigned int cpu)
561 {
562 	disable_percpu_irq(virq_sgi);
563 
564 	return 0;
565 }
566 
567 static void xlnx_disable_percpu_irq(void *data)
568 {
569 	disable_percpu_irq(virq_sgi);
570 }
571 
572 static int xlnx_event_init_sgi(struct platform_device *pdev)
573 {
574 	int ret = 0;
575 	/*
576 	 * IRQ related structures are used for the following:
577 	 * for each SGI interrupt ensure its mapped by GIC IRQ domain
578 	 * and that each corresponding linux IRQ for the HW IRQ has
579 	 * a handler for when receiving an interrupt from the remote
580 	 * processor.
581 	 */
582 	struct irq_domain *domain;
583 	struct irq_fwspec sgi_fwspec;
584 	struct device_node *interrupt_parent = NULL;
585 	struct device *parent = pdev->dev.parent;
586 
587 	/* Find GIC controller to map SGIs. */
588 	interrupt_parent = of_irq_find_parent(parent->of_node);
589 	if (!interrupt_parent) {
590 		dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
591 		return -EINVAL;
592 	}
593 
594 	/* Each SGI needs to be associated with GIC's IRQ domain. */
595 	domain = irq_find_host(interrupt_parent);
596 	of_node_put(interrupt_parent);
597 
598 	/* Each mapping needs GIC domain when finding IRQ mapping. */
599 	sgi_fwspec.fwnode = domain->fwnode;
600 
601 	/*
602 	 * When irq domain looks at mapping each arg is as follows:
603 	 * 3 args for: interrupt type (SGI), interrupt # (set later), type
604 	 */
605 	sgi_fwspec.param_count = 1;
606 
607 	/* Set SGI's hwirq */
608 	sgi_fwspec.param[0] = sgi_num;
609 	virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
610 
611 	ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
612 				 &dummy_cpu_number);
613 
614 	WARN_ON(ret);
615 	if (ret) {
616 		irq_dispose_mapping(virq_sgi);
617 		return ret;
618 	}
619 
620 	irq_to_desc(virq_sgi);
621 	irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
622 
623 	return ret;
624 }
625 
626 static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
627 {
628 	cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
629 
630 	on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
631 
632 	irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
633 	free_percpu_irq(virq_sgi, &dummy_cpu_number);
634 	irq_dispose_mapping(virq_sgi);
635 }
636 
637 static int xlnx_event_manager_probe(struct platform_device *pdev)
638 {
639 	int ret;
640 
641 	ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
642 	if (ret < 0) {
643 		dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
644 		return ret;
645 	}
646 
647 	if ((ret & FIRMWARE_VERSION_MASK) <
648 	    REGISTER_NOTIFIER_FIRMWARE_VERSION) {
649 		dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
650 			REGISTER_NOTIFIER_FIRMWARE_VERSION,
651 			ret & FIRMWARE_VERSION_MASK);
652 		return -EOPNOTSUPP;
653 	}
654 
655 	/* Initialize the SGI */
656 	ret = xlnx_event_init_sgi(pdev);
657 	if (ret) {
658 		dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
659 		return ret;
660 	}
661 
662 	/* Setup function for the CPU hot-plug cases */
663 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
664 			  xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
665 
666 	ret = zynqmp_pm_register_sgi(sgi_num, 0);
667 	if (ret) {
668 		if (ret == -EOPNOTSUPP)
669 			dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n");
670 		else
671 			dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret);
672 
673 		xlnx_event_cleanup_sgi(pdev);
674 		return ret;
675 	}
676 
677 	event_manager_availability = 0;
678 
679 	dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
680 	dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
681 
682 	return ret;
683 }
684 
685 static void xlnx_event_manager_remove(struct platform_device *pdev)
686 {
687 	int i;
688 	struct registered_event_data *eve_data;
689 	struct hlist_node *tmp;
690 	int ret;
691 	struct agent_cb *cb_pos;
692 	struct agent_cb *cb_next;
693 
694 	hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
695 		list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
696 			list_del_init(&cb_pos->list);
697 			kfree(cb_pos);
698 		}
699 		hash_del(&eve_data->hentry);
700 		kfree(eve_data);
701 	}
702 
703 	ret = zynqmp_pm_register_sgi(0, 1);
704 	if (ret)
705 		dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
706 
707 	xlnx_event_cleanup_sgi(pdev);
708 
709 	event_manager_availability = -EACCES;
710 }
711 
712 static struct platform_driver xlnx_event_manager_driver = {
713 	.probe = xlnx_event_manager_probe,
714 	.remove = xlnx_event_manager_remove,
715 	.driver = {
716 		.name = "xlnx_event_manager",
717 	},
718 };
719 module_param(sgi_num, uint, 0);
720 module_platform_driver(xlnx_event_manager_driver);
721