xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_events.c (revision 482f07775cf559c82cb3d086e3c4fad91582e4cb)
1f3a39818SAndrew Lewycky /*
2f3a39818SAndrew Lewycky  * Copyright 2014 Advanced Micro Devices, Inc.
3f3a39818SAndrew Lewycky  *
4f3a39818SAndrew Lewycky  * Permission is hereby granted, free of charge, to any person obtaining a
5f3a39818SAndrew Lewycky  * copy of this software and associated documentation files (the "Software"),
6f3a39818SAndrew Lewycky  * to deal in the Software without restriction, including without limitation
7f3a39818SAndrew Lewycky  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8f3a39818SAndrew Lewycky  * and/or sell copies of the Software, and to permit persons to whom the
9f3a39818SAndrew Lewycky  * Software is furnished to do so, subject to the following conditions:
10f3a39818SAndrew Lewycky  *
11f3a39818SAndrew Lewycky  * The above copyright notice and this permission notice shall be included in
12f3a39818SAndrew Lewycky  * all copies or substantial portions of the Software.
13f3a39818SAndrew Lewycky  *
14f3a39818SAndrew Lewycky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15f3a39818SAndrew Lewycky  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16f3a39818SAndrew Lewycky  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17f3a39818SAndrew Lewycky  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18f3a39818SAndrew Lewycky  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19f3a39818SAndrew Lewycky  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20f3a39818SAndrew Lewycky  * OTHER DEALINGS IN THE SOFTWARE.
21f3a39818SAndrew Lewycky  */
22f3a39818SAndrew Lewycky 
23f3a39818SAndrew Lewycky #include <linux/mm_types.h>
24f3a39818SAndrew Lewycky #include <linux/slab.h>
25f3a39818SAndrew Lewycky #include <linux/types.h>
263f07c014SIngo Molnar #include <linux/sched/signal.h>
279b56bb11SFelix Kuehling #include <linux/sched/mm.h>
28f3a39818SAndrew Lewycky #include <linux/uaccess.h>
29f3a39818SAndrew Lewycky #include <linux/mman.h>
30f3a39818SAndrew Lewycky #include <linux/memory.h>
31f3a39818SAndrew Lewycky #include "kfd_priv.h"
32f3a39818SAndrew Lewycky #include "kfd_events.h"
3359d3e8beSAlexey Skidanov #include <linux/device.h>
34f3a39818SAndrew Lewycky 
35f3a39818SAndrew Lewycky /*
3674e40716SFelix Kuehling  * Wrapper around wait_queue_entry_t
37f3a39818SAndrew Lewycky  */
38f3a39818SAndrew Lewycky struct kfd_event_waiter {
3974e40716SFelix Kuehling 	wait_queue_entry_t wait;
4074e40716SFelix Kuehling 	struct kfd_event *event; /* Event to wait for */
4174e40716SFelix Kuehling 	bool activated;		 /* Becomes true when event is signaled */
42f3a39818SAndrew Lewycky };
43f3a39818SAndrew Lewycky 
44f3a39818SAndrew Lewycky /*
45f3a39818SAndrew Lewycky  * Each signal event needs a 64-bit signal slot where the signaler will write
46*482f0777SFelix Kuehling  * a 1 before sending an interrupt. (This is needed because some interrupts
47f3a39818SAndrew Lewycky  * do not contain enough spare data bits to identify an event.)
48*482f0777SFelix Kuehling  * We get whole pages and map them to the process VA.
49*482f0777SFelix Kuehling  * Individual signal events use their event_id as slot index.
50f3a39818SAndrew Lewycky  */
5150cb7dd9SFelix Kuehling struct kfd_signal_page {
52f3a39818SAndrew Lewycky 	uint64_t *kernel_address;
53f3a39818SAndrew Lewycky 	uint64_t __user *user_address;
54f3a39818SAndrew Lewycky };
55f3a39818SAndrew Lewycky 
56f3a39818SAndrew Lewycky /*
57f3a39818SAndrew Lewycky  * For signal events, the event ID is used as the interrupt user data.
58f3a39818SAndrew Lewycky  * For SQ s_sendmsg interrupts, this is limited to 8 bits.
59f3a39818SAndrew Lewycky  */
60f3a39818SAndrew Lewycky 
61f3a39818SAndrew Lewycky #define INTERRUPT_DATA_BITS 8
62f3a39818SAndrew Lewycky 
6350cb7dd9SFelix Kuehling static uint64_t *page_slots(struct kfd_signal_page *page)
64f3a39818SAndrew Lewycky {
65f3a39818SAndrew Lewycky 	return page->kernel_address;
66f3a39818SAndrew Lewycky }
67f3a39818SAndrew Lewycky 
6850cb7dd9SFelix Kuehling static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
69f3a39818SAndrew Lewycky {
70f3a39818SAndrew Lewycky 	void *backing_store;
7150cb7dd9SFelix Kuehling 	struct kfd_signal_page *page;
72f3a39818SAndrew Lewycky 
7350cb7dd9SFelix Kuehling 	page = kzalloc(sizeof(*page), GFP_KERNEL);
74f3a39818SAndrew Lewycky 	if (!page)
7550cb7dd9SFelix Kuehling 		return NULL;
76f3a39818SAndrew Lewycky 
7750cb7dd9SFelix Kuehling 	backing_store = (void *) __get_free_pages(GFP_KERNEL,
78f3a39818SAndrew Lewycky 					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
79f3a39818SAndrew Lewycky 	if (!backing_store)
80f3a39818SAndrew Lewycky 		goto fail_alloc_signal_store;
81f3a39818SAndrew Lewycky 
8250cb7dd9SFelix Kuehling 	/* Initialize all events to unsignaled */
83f3a39818SAndrew Lewycky 	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
84f3a39818SAndrew Lewycky 	       KFD_SIGNAL_EVENT_LIMIT * 8);
85f3a39818SAndrew Lewycky 
86f3a39818SAndrew Lewycky 	page->kernel_address = backing_store;
8779775b62SKent Russell 	pr_debug("Allocated new event signal page at %p, for process %p\n",
88f3a39818SAndrew Lewycky 			page, p);
89f3a39818SAndrew Lewycky 
9050cb7dd9SFelix Kuehling 	return page;
91f3a39818SAndrew Lewycky 
92f3a39818SAndrew Lewycky fail_alloc_signal_store:
93f3a39818SAndrew Lewycky 	kfree(page);
9450cb7dd9SFelix Kuehling 	return NULL;
9550cb7dd9SFelix Kuehling }
9650cb7dd9SFelix Kuehling 
97*482f0777SFelix Kuehling static int allocate_event_notification_slot(struct kfd_process *p,
98*482f0777SFelix Kuehling 					    struct kfd_event *ev)
9950cb7dd9SFelix Kuehling {
100*482f0777SFelix Kuehling 	int id;
101*482f0777SFelix Kuehling 
10250cb7dd9SFelix Kuehling 	if (!p->signal_page) {
10350cb7dd9SFelix Kuehling 		p->signal_page = allocate_signal_page(p);
10450cb7dd9SFelix Kuehling 		if (!p->signal_page)
105*482f0777SFelix Kuehling 			return -ENOMEM;
106f3a39818SAndrew Lewycky 	}
107f3a39818SAndrew Lewycky 
108*482f0777SFelix Kuehling 	id = idr_alloc(&p->event_idr, ev, 0, KFD_SIGNAL_EVENT_LIMIT,
109*482f0777SFelix Kuehling 		       GFP_KERNEL);
110*482f0777SFelix Kuehling 	if (id < 0)
111*482f0777SFelix Kuehling 		return id;
112f3a39818SAndrew Lewycky 
113*482f0777SFelix Kuehling 	ev->event_id = id;
114*482f0777SFelix Kuehling 	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
115f3a39818SAndrew Lewycky 
116*482f0777SFelix Kuehling 	return 0;
117f3a39818SAndrew Lewycky }
118f3a39818SAndrew Lewycky 
119f3a39818SAndrew Lewycky /*
120f3a39818SAndrew Lewycky  * Assumes that p->event_mutex is held and of course that p is not going
121f3a39818SAndrew Lewycky  * away (current or locked).
122f3a39818SAndrew Lewycky  */
123f3a39818SAndrew Lewycky static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
124f3a39818SAndrew Lewycky {
125*482f0777SFelix Kuehling 	return idr_find(&p->event_idr, id);
126f3a39818SAndrew Lewycky }
127f3a39818SAndrew Lewycky 
128f3a39818SAndrew Lewycky static int create_signal_event(struct file *devkfd,
129f3a39818SAndrew Lewycky 				struct kfd_process *p,
130f3a39818SAndrew Lewycky 				struct kfd_event *ev)
131f3a39818SAndrew Lewycky {
132*482f0777SFelix Kuehling 	int ret;
133*482f0777SFelix Kuehling 
134f3a39818SAndrew Lewycky 	if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
135c986169fSFelix Kuehling 		if (!p->signal_event_limit_reached) {
13679775b62SKent Russell 			pr_warn("Signal event wasn't created because limit was reached\n");
137c986169fSFelix Kuehling 			p->signal_event_limit_reached = true;
138c986169fSFelix Kuehling 		}
139*482f0777SFelix Kuehling 		return -ENOSPC;
140f3a39818SAndrew Lewycky 	}
141f3a39818SAndrew Lewycky 
142*482f0777SFelix Kuehling 	ret = allocate_event_notification_slot(p, ev);
143*482f0777SFelix Kuehling 	if (ret) {
14479775b62SKent Russell 		pr_warn("Signal event wasn't created because out of kernel memory\n");
145*482f0777SFelix Kuehling 		return ret;
146f3a39818SAndrew Lewycky 	}
147f3a39818SAndrew Lewycky 
148f3a39818SAndrew Lewycky 	p->signal_event_count++;
149f3a39818SAndrew Lewycky 
150*482f0777SFelix Kuehling 	ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
15179775b62SKent Russell 	pr_debug("Signal event number %zu created with id %d, address %p\n",
1526235e15eSOded Gabbay 			p->signal_event_count, ev->event_id,
1536235e15eSOded Gabbay 			ev->user_signal_address);
1546235e15eSOded Gabbay 
155f3a39818SAndrew Lewycky 	return 0;
156f3a39818SAndrew Lewycky }
157f3a39818SAndrew Lewycky 
158f3a39818SAndrew Lewycky static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
159f3a39818SAndrew Lewycky {
160*482f0777SFelix Kuehling 	/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
161*482f0777SFelix Kuehling 	 * intentional integer overflow to -1 without a compiler
162*482f0777SFelix Kuehling 	 * warning. idr_alloc treats a negative value as "maximum
163*482f0777SFelix Kuehling 	 * signed integer".
164*482f0777SFelix Kuehling 	 */
165*482f0777SFelix Kuehling 	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
166*482f0777SFelix Kuehling 			   (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
167*482f0777SFelix Kuehling 			   GFP_KERNEL);
168*482f0777SFelix Kuehling 
169*482f0777SFelix Kuehling 	if (id < 0)
170*482f0777SFelix Kuehling 		return id;
171*482f0777SFelix Kuehling 	ev->event_id = id;
172f3a39818SAndrew Lewycky 
173f3a39818SAndrew Lewycky 	return 0;
174f3a39818SAndrew Lewycky }
175f3a39818SAndrew Lewycky 
176f3a39818SAndrew Lewycky void kfd_event_init_process(struct kfd_process *p)
177f3a39818SAndrew Lewycky {
178f3a39818SAndrew Lewycky 	mutex_init(&p->event_mutex);
179*482f0777SFelix Kuehling 	idr_init(&p->event_idr);
18050cb7dd9SFelix Kuehling 	p->signal_page = NULL;
181f3a39818SAndrew Lewycky 	p->signal_event_count = 0;
182f3a39818SAndrew Lewycky }
183f3a39818SAndrew Lewycky 
184f3a39818SAndrew Lewycky static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
185f3a39818SAndrew Lewycky {
18674e40716SFelix Kuehling 	struct kfd_event_waiter *waiter;
187fe528c13SFelix Kuehling 
18874e40716SFelix Kuehling 	/* Wake up pending waiters. They will return failure */
18974e40716SFelix Kuehling 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
190fe528c13SFelix Kuehling 		waiter->event = NULL;
19174e40716SFelix Kuehling 	wake_up_all(&ev->wq);
192fe528c13SFelix Kuehling 
193*482f0777SFelix Kuehling 	if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
194*482f0777SFelix Kuehling 	    ev->type == KFD_EVENT_TYPE_DEBUG)
195f3a39818SAndrew Lewycky 		p->signal_event_count--;
196f3a39818SAndrew Lewycky 
197*482f0777SFelix Kuehling 	idr_remove(&p->event_idr, ev->event_id);
198f3a39818SAndrew Lewycky 	kfree(ev);
199f3a39818SAndrew Lewycky }
200f3a39818SAndrew Lewycky 
201f3a39818SAndrew Lewycky static void destroy_events(struct kfd_process *p)
202f3a39818SAndrew Lewycky {
203f3a39818SAndrew Lewycky 	struct kfd_event *ev;
204*482f0777SFelix Kuehling 	uint32_t id;
205f3a39818SAndrew Lewycky 
206*482f0777SFelix Kuehling 	idr_for_each_entry(&p->event_idr, ev, id)
207f3a39818SAndrew Lewycky 		destroy_event(p, ev);
208*482f0777SFelix Kuehling 	idr_destroy(&p->event_idr);
209f3a39818SAndrew Lewycky }
210f3a39818SAndrew Lewycky 
211f3a39818SAndrew Lewycky /*
212f3a39818SAndrew Lewycky  * We assume that the process is being destroyed and there is no need to
213f3a39818SAndrew Lewycky  * unmap the pages or keep bookkeeping data in order.
214f3a39818SAndrew Lewycky  */
21550cb7dd9SFelix Kuehling static void shutdown_signal_page(struct kfd_process *p)
216f3a39818SAndrew Lewycky {
21750cb7dd9SFelix Kuehling 	struct kfd_signal_page *page = p->signal_page;
218f3a39818SAndrew Lewycky 
21950cb7dd9SFelix Kuehling 	if (page) {
220f3a39818SAndrew Lewycky 		free_pages((unsigned long)page->kernel_address,
221f3a39818SAndrew Lewycky 				get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
222f3a39818SAndrew Lewycky 		kfree(page);
223f3a39818SAndrew Lewycky 	}
224f3a39818SAndrew Lewycky }
225f3a39818SAndrew Lewycky 
226f3a39818SAndrew Lewycky void kfd_event_free_process(struct kfd_process *p)
227f3a39818SAndrew Lewycky {
228f3a39818SAndrew Lewycky 	destroy_events(p);
22950cb7dd9SFelix Kuehling 	shutdown_signal_page(p);
230f3a39818SAndrew Lewycky }
231f3a39818SAndrew Lewycky 
232f3a39818SAndrew Lewycky static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
233f3a39818SAndrew Lewycky {
234f3a39818SAndrew Lewycky 	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
235f3a39818SAndrew Lewycky 					ev->type == KFD_EVENT_TYPE_DEBUG;
236f3a39818SAndrew Lewycky }
237f3a39818SAndrew Lewycky 
238f3a39818SAndrew Lewycky static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
239f3a39818SAndrew Lewycky {
240f3a39818SAndrew Lewycky 	return ev->type == KFD_EVENT_TYPE_SIGNAL;
241f3a39818SAndrew Lewycky }
242f3a39818SAndrew Lewycky 
243f3a39818SAndrew Lewycky int kfd_event_create(struct file *devkfd, struct kfd_process *p,
244f3a39818SAndrew Lewycky 		     uint32_t event_type, bool auto_reset, uint32_t node_id,
245f3a39818SAndrew Lewycky 		     uint32_t *event_id, uint32_t *event_trigger_data,
246f3a39818SAndrew Lewycky 		     uint64_t *event_page_offset, uint32_t *event_slot_index)
247f3a39818SAndrew Lewycky {
248f3a39818SAndrew Lewycky 	int ret = 0;
249f3a39818SAndrew Lewycky 	struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
250f3a39818SAndrew Lewycky 
251f3a39818SAndrew Lewycky 	if (!ev)
252f3a39818SAndrew Lewycky 		return -ENOMEM;
253f3a39818SAndrew Lewycky 
254f3a39818SAndrew Lewycky 	ev->type = event_type;
255f3a39818SAndrew Lewycky 	ev->auto_reset = auto_reset;
256f3a39818SAndrew Lewycky 	ev->signaled = false;
257f3a39818SAndrew Lewycky 
25874e40716SFelix Kuehling 	init_waitqueue_head(&ev->wq);
259f3a39818SAndrew Lewycky 
260f3a39818SAndrew Lewycky 	*event_page_offset = 0;
261f3a39818SAndrew Lewycky 
262f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
263f3a39818SAndrew Lewycky 
264f3a39818SAndrew Lewycky 	switch (event_type) {
265f3a39818SAndrew Lewycky 	case KFD_EVENT_TYPE_SIGNAL:
266f3a39818SAndrew Lewycky 	case KFD_EVENT_TYPE_DEBUG:
267f3a39818SAndrew Lewycky 		ret = create_signal_event(devkfd, p, ev);
268f3a39818SAndrew Lewycky 		if (!ret) {
26950cb7dd9SFelix Kuehling 			*event_page_offset = KFD_MMAP_EVENTS_MASK;
270f3a39818SAndrew Lewycky 			*event_page_offset <<= PAGE_SHIFT;
271*482f0777SFelix Kuehling 			*event_slot_index = ev->event_id;
272f3a39818SAndrew Lewycky 		}
273f3a39818SAndrew Lewycky 		break;
274f3a39818SAndrew Lewycky 	default:
275f3a39818SAndrew Lewycky 		ret = create_other_event(p, ev);
276f3a39818SAndrew Lewycky 		break;
277f3a39818SAndrew Lewycky 	}
278f3a39818SAndrew Lewycky 
279f3a39818SAndrew Lewycky 	if (!ret) {
280f3a39818SAndrew Lewycky 		*event_id = ev->event_id;
281f3a39818SAndrew Lewycky 		*event_trigger_data = ev->event_id;
282f3a39818SAndrew Lewycky 	} else {
283f3a39818SAndrew Lewycky 		kfree(ev);
284f3a39818SAndrew Lewycky 	}
285f3a39818SAndrew Lewycky 
286f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
287f3a39818SAndrew Lewycky 
288f3a39818SAndrew Lewycky 	return ret;
289f3a39818SAndrew Lewycky }
290f3a39818SAndrew Lewycky 
291f3a39818SAndrew Lewycky /* Assumes that p is current. */
292f3a39818SAndrew Lewycky int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
293f3a39818SAndrew Lewycky {
294f3a39818SAndrew Lewycky 	struct kfd_event *ev;
295f3a39818SAndrew Lewycky 	int ret = 0;
296f3a39818SAndrew Lewycky 
297f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
298f3a39818SAndrew Lewycky 
299f3a39818SAndrew Lewycky 	ev = lookup_event_by_id(p, event_id);
300f3a39818SAndrew Lewycky 
301f3a39818SAndrew Lewycky 	if (ev)
302f3a39818SAndrew Lewycky 		destroy_event(p, ev);
303f3a39818SAndrew Lewycky 	else
304f3a39818SAndrew Lewycky 		ret = -EINVAL;
305f3a39818SAndrew Lewycky 
306f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
307f3a39818SAndrew Lewycky 	return ret;
308f3a39818SAndrew Lewycky }
309f3a39818SAndrew Lewycky 
310f3a39818SAndrew Lewycky static void set_event(struct kfd_event *ev)
311f3a39818SAndrew Lewycky {
312f3a39818SAndrew Lewycky 	struct kfd_event_waiter *waiter;
313f3a39818SAndrew Lewycky 
31474e40716SFelix Kuehling 	/* Auto reset if the list is non-empty and we're waking
31574e40716SFelix Kuehling 	 * someone. waitqueue_active is safe here because we're
31674e40716SFelix Kuehling 	 * protected by the p->event_mutex, which is also held when
31774e40716SFelix Kuehling 	 * updating the wait queues in kfd_wait_on_events.
31874e40716SFelix Kuehling 	 */
31974e40716SFelix Kuehling 	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
320f3a39818SAndrew Lewycky 
32174e40716SFelix Kuehling 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
322f3a39818SAndrew Lewycky 		waiter->activated = true;
323f3a39818SAndrew Lewycky 
32474e40716SFelix Kuehling 	wake_up_all(&ev->wq);
325f3a39818SAndrew Lewycky }
326f3a39818SAndrew Lewycky 
327f3a39818SAndrew Lewycky /* Assumes that p is current. */
328f3a39818SAndrew Lewycky int kfd_set_event(struct kfd_process *p, uint32_t event_id)
329f3a39818SAndrew Lewycky {
330f3a39818SAndrew Lewycky 	int ret = 0;
331f3a39818SAndrew Lewycky 	struct kfd_event *ev;
332f3a39818SAndrew Lewycky 
333f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
334f3a39818SAndrew Lewycky 
335f3a39818SAndrew Lewycky 	ev = lookup_event_by_id(p, event_id);
336f3a39818SAndrew Lewycky 
337f3a39818SAndrew Lewycky 	if (ev && event_can_be_cpu_signaled(ev))
338f3a39818SAndrew Lewycky 		set_event(ev);
339f3a39818SAndrew Lewycky 	else
340f3a39818SAndrew Lewycky 		ret = -EINVAL;
341f3a39818SAndrew Lewycky 
342f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
343f3a39818SAndrew Lewycky 	return ret;
344f3a39818SAndrew Lewycky }
345f3a39818SAndrew Lewycky 
346f3a39818SAndrew Lewycky static void reset_event(struct kfd_event *ev)
347f3a39818SAndrew Lewycky {
348f3a39818SAndrew Lewycky 	ev->signaled = false;
349f3a39818SAndrew Lewycky }
350f3a39818SAndrew Lewycky 
351f3a39818SAndrew Lewycky /* Assumes that p is current. */
352f3a39818SAndrew Lewycky int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
353f3a39818SAndrew Lewycky {
354f3a39818SAndrew Lewycky 	int ret = 0;
355f3a39818SAndrew Lewycky 	struct kfd_event *ev;
356f3a39818SAndrew Lewycky 
357f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
358f3a39818SAndrew Lewycky 
359f3a39818SAndrew Lewycky 	ev = lookup_event_by_id(p, event_id);
360f3a39818SAndrew Lewycky 
361f3a39818SAndrew Lewycky 	if (ev && event_can_be_cpu_signaled(ev))
362f3a39818SAndrew Lewycky 		reset_event(ev);
363f3a39818SAndrew Lewycky 	else
364f3a39818SAndrew Lewycky 		ret = -EINVAL;
365f3a39818SAndrew Lewycky 
366f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
367f3a39818SAndrew Lewycky 	return ret;
368f3a39818SAndrew Lewycky 
369f3a39818SAndrew Lewycky }
370f3a39818SAndrew Lewycky 
371f3a39818SAndrew Lewycky static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
372f3a39818SAndrew Lewycky {
373*482f0777SFelix Kuehling 	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
374f3a39818SAndrew Lewycky }
375f3a39818SAndrew Lewycky 
376f3a39818SAndrew Lewycky static void set_event_from_interrupt(struct kfd_process *p,
377f3a39818SAndrew Lewycky 					struct kfd_event *ev)
378f3a39818SAndrew Lewycky {
379f3a39818SAndrew Lewycky 	if (ev && event_can_be_gpu_signaled(ev)) {
380f3a39818SAndrew Lewycky 		acknowledge_signal(p, ev);
381f3a39818SAndrew Lewycky 		set_event(ev);
382f3a39818SAndrew Lewycky 	}
383f3a39818SAndrew Lewycky }
384f3a39818SAndrew Lewycky 
385f3a39818SAndrew Lewycky void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
386f3a39818SAndrew Lewycky 				uint32_t valid_id_bits)
387f3a39818SAndrew Lewycky {
388f3a39818SAndrew Lewycky 	struct kfd_event *ev;
389f3a39818SAndrew Lewycky 
390f3a39818SAndrew Lewycky 	/*
391f3a39818SAndrew Lewycky 	 * Because we are called from arbitrary context (workqueue) as opposed
392f3a39818SAndrew Lewycky 	 * to process context, kfd_process could attempt to exit while we are
393f3a39818SAndrew Lewycky 	 * running so the lookup function returns a locked process.
394f3a39818SAndrew Lewycky 	 */
395f3a39818SAndrew Lewycky 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
396f3a39818SAndrew Lewycky 
397f3a39818SAndrew Lewycky 	if (!p)
398f3a39818SAndrew Lewycky 		return; /* Presumably process exited. */
399f3a39818SAndrew Lewycky 
400f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
401f3a39818SAndrew Lewycky 
402f3a39818SAndrew Lewycky 	if (valid_id_bits >= INTERRUPT_DATA_BITS) {
403f3a39818SAndrew Lewycky 		/* Partial ID is a full ID. */
404f3a39818SAndrew Lewycky 		ev = lookup_event_by_id(p, partial_id);
405f3a39818SAndrew Lewycky 		set_event_from_interrupt(p, ev);
40650cb7dd9SFelix Kuehling 	} else if (p->signal_page) {
407f3a39818SAndrew Lewycky 		/*
408f3a39818SAndrew Lewycky 		 * Partial ID is in fact partial. For now we completely
409f3a39818SAndrew Lewycky 		 * ignore it, but we could use any bits we did receive to
410f3a39818SAndrew Lewycky 		 * search faster.
411f3a39818SAndrew Lewycky 		 */
412*482f0777SFelix Kuehling 		uint64_t *slots = page_slots(p->signal_page);
413*482f0777SFelix Kuehling 		uint32_t id;
414f3a39818SAndrew Lewycky 
415*482f0777SFelix Kuehling 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
416*482f0777SFelix Kuehling 			/* With relatively few events, it's faster to
417*482f0777SFelix Kuehling 			 * iterate over the event IDR
418*482f0777SFelix Kuehling 			 */
419*482f0777SFelix Kuehling 			idr_for_each_entry(&p->event_idr, ev, id) {
420*482f0777SFelix Kuehling 				if (id >= KFD_SIGNAL_EVENT_LIMIT)
421*482f0777SFelix Kuehling 					break;
422*482f0777SFelix Kuehling 
423*482f0777SFelix Kuehling 				if (slots[id] != UNSIGNALED_EVENT_SLOT)
424f3a39818SAndrew Lewycky 					set_event_from_interrupt(p, ev);
425f3a39818SAndrew Lewycky 			}
426*482f0777SFelix Kuehling 		} else {
427*482f0777SFelix Kuehling 			/* With relatively many events, it's faster to
428*482f0777SFelix Kuehling 			 * iterate over the signal slots and lookup
429*482f0777SFelix Kuehling 			 * only signaled events from the IDR.
430*482f0777SFelix Kuehling 			 */
431*482f0777SFelix Kuehling 			for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
432*482f0777SFelix Kuehling 				if (slots[id] != UNSIGNALED_EVENT_SLOT) {
433*482f0777SFelix Kuehling 					ev = lookup_event_by_id(p, id);
434*482f0777SFelix Kuehling 					set_event_from_interrupt(p, ev);
435*482f0777SFelix Kuehling 				}
436*482f0777SFelix Kuehling 		}
437f3a39818SAndrew Lewycky 	}
438f3a39818SAndrew Lewycky 
439f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
440f3a39818SAndrew Lewycky 	mutex_unlock(&p->mutex);
441f3a39818SAndrew Lewycky }
442f3a39818SAndrew Lewycky 
443f3a39818SAndrew Lewycky static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
444f3a39818SAndrew Lewycky {
445f3a39818SAndrew Lewycky 	struct kfd_event_waiter *event_waiters;
446f3a39818SAndrew Lewycky 	uint32_t i;
447f3a39818SAndrew Lewycky 
448f3a39818SAndrew Lewycky 	event_waiters = kmalloc_array(num_events,
449f3a39818SAndrew Lewycky 					sizeof(struct kfd_event_waiter),
450f3a39818SAndrew Lewycky 					GFP_KERNEL);
451f3a39818SAndrew Lewycky 
452f3a39818SAndrew Lewycky 	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
45374e40716SFelix Kuehling 		init_wait(&event_waiters[i].wait);
454f3a39818SAndrew Lewycky 		event_waiters[i].activated = false;
455f3a39818SAndrew Lewycky 	}
456f3a39818SAndrew Lewycky 
457f3a39818SAndrew Lewycky 	return event_waiters;
458f3a39818SAndrew Lewycky }
459f3a39818SAndrew Lewycky 
4601f9d09beSSean Keely static int init_event_waiter_get_status(struct kfd_process *p,
461f3a39818SAndrew Lewycky 		struct kfd_event_waiter *waiter,
462ebf947feSFelix Kuehling 		uint32_t event_id)
463f3a39818SAndrew Lewycky {
464f3a39818SAndrew Lewycky 	struct kfd_event *ev = lookup_event_by_id(p, event_id);
465f3a39818SAndrew Lewycky 
466f3a39818SAndrew Lewycky 	if (!ev)
467f3a39818SAndrew Lewycky 		return -EINVAL;
468f3a39818SAndrew Lewycky 
46959d3e8beSAlexey Skidanov 	waiter->event = ev;
470f3a39818SAndrew Lewycky 	waiter->activated = ev->signaled;
471f3a39818SAndrew Lewycky 	ev->signaled = ev->signaled && !ev->auto_reset;
472f3a39818SAndrew Lewycky 
473f3a39818SAndrew Lewycky 	return 0;
474f3a39818SAndrew Lewycky }
475f3a39818SAndrew Lewycky 
4761f9d09beSSean Keely static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
4771f9d09beSSean Keely {
4781f9d09beSSean Keely 	struct kfd_event *ev = waiter->event;
4791f9d09beSSean Keely 
4801f9d09beSSean Keely 	/* Only add to the wait list if we actually need to
4811f9d09beSSean Keely 	 * wait on this event.
4821f9d09beSSean Keely 	 */
4831f9d09beSSean Keely 	if (!waiter->activated)
48474e40716SFelix Kuehling 		add_wait_queue(&ev->wq, &waiter->wait);
4851f9d09beSSean Keely }
4861f9d09beSSean Keely 
487fe528c13SFelix Kuehling /* test_event_condition - Test condition of events being waited for
488fe528c13SFelix Kuehling  * @all:           Return completion only if all events have signaled
489fe528c13SFelix Kuehling  * @num_events:    Number of events to wait for
490fe528c13SFelix Kuehling  * @event_waiters: Array of event waiters, one per event
491fe528c13SFelix Kuehling  *
492fe528c13SFelix Kuehling  * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
493fe528c13SFelix Kuehling  * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
494fe528c13SFelix Kuehling  * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
495fe528c13SFelix Kuehling  * the events have been destroyed.
496fe528c13SFelix Kuehling  */
497fe528c13SFelix Kuehling static uint32_t test_event_condition(bool all, uint32_t num_events,
498f3a39818SAndrew Lewycky 				struct kfd_event_waiter *event_waiters)
499f3a39818SAndrew Lewycky {
500f3a39818SAndrew Lewycky 	uint32_t i;
501f3a39818SAndrew Lewycky 	uint32_t activated_count = 0;
502f3a39818SAndrew Lewycky 
503f3a39818SAndrew Lewycky 	for (i = 0; i < num_events; i++) {
504fe528c13SFelix Kuehling 		if (!event_waiters[i].event)
505fe528c13SFelix Kuehling 			return KFD_IOC_WAIT_RESULT_FAIL;
506fe528c13SFelix Kuehling 
507f3a39818SAndrew Lewycky 		if (event_waiters[i].activated) {
508f3a39818SAndrew Lewycky 			if (!all)
509fe528c13SFelix Kuehling 				return KFD_IOC_WAIT_RESULT_COMPLETE;
510f3a39818SAndrew Lewycky 
511f3a39818SAndrew Lewycky 			activated_count++;
512f3a39818SAndrew Lewycky 		}
513f3a39818SAndrew Lewycky 	}
514f3a39818SAndrew Lewycky 
515fe528c13SFelix Kuehling 	return activated_count == num_events ?
516fe528c13SFelix Kuehling 		KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
517f3a39818SAndrew Lewycky }
518f3a39818SAndrew Lewycky 
51959d3e8beSAlexey Skidanov /*
52059d3e8beSAlexey Skidanov  * Copy event specific data, if defined.
52159d3e8beSAlexey Skidanov  * Currently only memory exception events have additional data to copy to user
52259d3e8beSAlexey Skidanov  */
523fdf0c833SFelix Kuehling static int copy_signaled_event_data(uint32_t num_events,
52459d3e8beSAlexey Skidanov 		struct kfd_event_waiter *event_waiters,
52559d3e8beSAlexey Skidanov 		struct kfd_event_data __user *data)
52659d3e8beSAlexey Skidanov {
52759d3e8beSAlexey Skidanov 	struct kfd_hsa_memory_exception_data *src;
52859d3e8beSAlexey Skidanov 	struct kfd_hsa_memory_exception_data __user *dst;
52959d3e8beSAlexey Skidanov 	struct kfd_event_waiter *waiter;
53059d3e8beSAlexey Skidanov 	struct kfd_event *event;
53159d3e8beSAlexey Skidanov 	uint32_t i;
53259d3e8beSAlexey Skidanov 
53359d3e8beSAlexey Skidanov 	for (i = 0; i < num_events; i++) {
53459d3e8beSAlexey Skidanov 		waiter = &event_waiters[i];
53559d3e8beSAlexey Skidanov 		event = waiter->event;
53659d3e8beSAlexey Skidanov 		if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
537ebf947feSFelix Kuehling 			dst = &data[i].memory_exception_data;
53859d3e8beSAlexey Skidanov 			src = &event->memory_exception_data;
53959d3e8beSAlexey Skidanov 			if (copy_to_user(dst, src,
54059d3e8beSAlexey Skidanov 				sizeof(struct kfd_hsa_memory_exception_data)))
541fdf0c833SFelix Kuehling 				return -EFAULT;
54259d3e8beSAlexey Skidanov 		}
54359d3e8beSAlexey Skidanov 	}
54459d3e8beSAlexey Skidanov 
545fdf0c833SFelix Kuehling 	return 0;
54659d3e8beSAlexey Skidanov 
54759d3e8beSAlexey Skidanov }
54859d3e8beSAlexey Skidanov 
54959d3e8beSAlexey Skidanov 
55059d3e8beSAlexey Skidanov 
551f3a39818SAndrew Lewycky static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
552f3a39818SAndrew Lewycky {
553f3a39818SAndrew Lewycky 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
554f3a39818SAndrew Lewycky 		return 0;
555f3a39818SAndrew Lewycky 
556f3a39818SAndrew Lewycky 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
557f3a39818SAndrew Lewycky 		return MAX_SCHEDULE_TIMEOUT;
558f3a39818SAndrew Lewycky 
559f3a39818SAndrew Lewycky 	/*
560f3a39818SAndrew Lewycky 	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
561f3a39818SAndrew Lewycky 	 * but we consider them finite.
562f3a39818SAndrew Lewycky 	 * This hack is wrong, but nobody is likely to notice.
563f3a39818SAndrew Lewycky 	 */
564f3a39818SAndrew Lewycky 	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
565f3a39818SAndrew Lewycky 
566f3a39818SAndrew Lewycky 	return msecs_to_jiffies(user_timeout_ms) + 1;
567f3a39818SAndrew Lewycky }
568f3a39818SAndrew Lewycky 
569f3a39818SAndrew Lewycky static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
570f3a39818SAndrew Lewycky {
571f3a39818SAndrew Lewycky 	uint32_t i;
572f3a39818SAndrew Lewycky 
573f3a39818SAndrew Lewycky 	for (i = 0; i < num_events; i++)
57474e40716SFelix Kuehling 		if (waiters[i].event)
57574e40716SFelix Kuehling 			remove_wait_queue(&waiters[i].event->wq,
57674e40716SFelix Kuehling 					  &waiters[i].wait);
577f3a39818SAndrew Lewycky 
578f3a39818SAndrew Lewycky 	kfree(waiters);
579f3a39818SAndrew Lewycky }
580f3a39818SAndrew Lewycky 
581f3a39818SAndrew Lewycky int kfd_wait_on_events(struct kfd_process *p,
58259d3e8beSAlexey Skidanov 		       uint32_t num_events, void __user *data,
583f3a39818SAndrew Lewycky 		       bool all, uint32_t user_timeout_ms,
584fdf0c833SFelix Kuehling 		       uint32_t *wait_result)
585f3a39818SAndrew Lewycky {
58659d3e8beSAlexey Skidanov 	struct kfd_event_data __user *events =
58759d3e8beSAlexey Skidanov 			(struct kfd_event_data __user *) data;
588f3a39818SAndrew Lewycky 	uint32_t i;
589f3a39818SAndrew Lewycky 	int ret = 0;
5901f9d09beSSean Keely 
591f3a39818SAndrew Lewycky 	struct kfd_event_waiter *event_waiters = NULL;
592f3a39818SAndrew Lewycky 	long timeout = user_timeout_to_jiffies(user_timeout_ms);
593f3a39818SAndrew Lewycky 
594fdf0c833SFelix Kuehling 	event_waiters = alloc_event_waiters(num_events);
595fdf0c833SFelix Kuehling 	if (!event_waiters) {
596fdf0c833SFelix Kuehling 		ret = -ENOMEM;
597fdf0c833SFelix Kuehling 		goto out;
598fdf0c833SFelix Kuehling 	}
599fdf0c833SFelix Kuehling 
600f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
601f3a39818SAndrew Lewycky 
602f3a39818SAndrew Lewycky 	for (i = 0; i < num_events; i++) {
60359d3e8beSAlexey Skidanov 		struct kfd_event_data event_data;
604f3a39818SAndrew Lewycky 
60559d3e8beSAlexey Skidanov 		if (copy_from_user(&event_data, &events[i],
6068bf79388SPan Bian 				sizeof(struct kfd_event_data))) {
6078bf79388SPan Bian 			ret = -EFAULT;
608fdf0c833SFelix Kuehling 			goto out_unlock;
6098bf79388SPan Bian 		}
610f3a39818SAndrew Lewycky 
6111f9d09beSSean Keely 		ret = init_event_waiter_get_status(p, &event_waiters[i],
612ebf947feSFelix Kuehling 				event_data.event_id);
613f3a39818SAndrew Lewycky 		if (ret)
614fdf0c833SFelix Kuehling 			goto out_unlock;
615f3a39818SAndrew Lewycky 	}
616f3a39818SAndrew Lewycky 
6171f9d09beSSean Keely 	/* Check condition once. */
618fe528c13SFelix Kuehling 	*wait_result = test_event_condition(all, num_events, event_waiters);
619fe528c13SFelix Kuehling 	if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
620fdf0c833SFelix Kuehling 		ret = copy_signaled_event_data(num_events,
621fdf0c833SFelix Kuehling 					       event_waiters, events);
622fdf0c833SFelix Kuehling 		goto out_unlock;
623fe528c13SFelix Kuehling 	} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
624fe528c13SFelix Kuehling 		/* This should not happen. Events shouldn't be
625fe528c13SFelix Kuehling 		 * destroyed while we're holding the event_mutex
626fe528c13SFelix Kuehling 		 */
627fe528c13SFelix Kuehling 		goto out_unlock;
628fe528c13SFelix Kuehling 	}
629fe528c13SFelix Kuehling 
6301f9d09beSSean Keely 	/* Add to wait lists if we need to wait. */
6311f9d09beSSean Keely 	for (i = 0; i < num_events; i++)
6321f9d09beSSean Keely 		init_event_waiter_add_to_waitlist(&event_waiters[i]);
6331f9d09beSSean Keely 
634f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
635f3a39818SAndrew Lewycky 
636f3a39818SAndrew Lewycky 	while (true) {
637f3a39818SAndrew Lewycky 		if (fatal_signal_pending(current)) {
638f3a39818SAndrew Lewycky 			ret = -EINTR;
639f3a39818SAndrew Lewycky 			break;
640f3a39818SAndrew Lewycky 		}
641f3a39818SAndrew Lewycky 
642f3a39818SAndrew Lewycky 		if (signal_pending(current)) {
643f3a39818SAndrew Lewycky 			/*
644f3a39818SAndrew Lewycky 			 * This is wrong when a nonzero, non-infinite timeout
645f3a39818SAndrew Lewycky 			 * is specified. We need to use
646f3a39818SAndrew Lewycky 			 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
647f3a39818SAndrew Lewycky 			 * contains a union with data for each user and it's
648f3a39818SAndrew Lewycky 			 * in generic kernel code that I don't want to
649f3a39818SAndrew Lewycky 			 * touch yet.
650f3a39818SAndrew Lewycky 			 */
651f3a39818SAndrew Lewycky 			ret = -ERESTARTSYS;
652f3a39818SAndrew Lewycky 			break;
653f3a39818SAndrew Lewycky 		}
654f3a39818SAndrew Lewycky 
655d9aeec4cSSean Keely 		/* Set task state to interruptible sleep before
656d9aeec4cSSean Keely 		 * checking wake-up conditions. A concurrent wake-up
657d9aeec4cSSean Keely 		 * will put the task back into runnable state. In that
658d9aeec4cSSean Keely 		 * case schedule_timeout will not put the task to
659d9aeec4cSSean Keely 		 * sleep and we'll get a chance to re-check the
660d9aeec4cSSean Keely 		 * updated conditions almost immediately. Otherwise,
661d9aeec4cSSean Keely 		 * this race condition would lead to a soft hang or a
662d9aeec4cSSean Keely 		 * very long sleep.
663d9aeec4cSSean Keely 		 */
664d9aeec4cSSean Keely 		set_current_state(TASK_INTERRUPTIBLE);
665d9aeec4cSSean Keely 
666fe528c13SFelix Kuehling 		*wait_result = test_event_condition(all, num_events,
667fe528c13SFelix Kuehling 						    event_waiters);
668fe528c13SFelix Kuehling 		if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
669f3a39818SAndrew Lewycky 			break;
670f3a39818SAndrew Lewycky 
671fe528c13SFelix Kuehling 		if (timeout <= 0)
672f3a39818SAndrew Lewycky 			break;
673f3a39818SAndrew Lewycky 
674d9aeec4cSSean Keely 		timeout = schedule_timeout(timeout);
675f3a39818SAndrew Lewycky 	}
676f3a39818SAndrew Lewycky 	__set_current_state(TASK_RUNNING);
677f3a39818SAndrew Lewycky 
678fdf0c833SFelix Kuehling 	/* copy_signaled_event_data may sleep. So this has to happen
679fdf0c833SFelix Kuehling 	 * after the task state is set back to RUNNING.
680fdf0c833SFelix Kuehling 	 */
681fdf0c833SFelix Kuehling 	if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
682fdf0c833SFelix Kuehling 		ret = copy_signaled_event_data(num_events,
683fdf0c833SFelix Kuehling 					       event_waiters, events);
684fdf0c833SFelix Kuehling 
685f3a39818SAndrew Lewycky 	mutex_lock(&p->event_mutex);
686fdf0c833SFelix Kuehling out_unlock:
687f3a39818SAndrew Lewycky 	free_waiters(num_events, event_waiters);
688f3a39818SAndrew Lewycky 	mutex_unlock(&p->event_mutex);
689fdf0c833SFelix Kuehling out:
690fdf0c833SFelix Kuehling 	if (ret)
691fdf0c833SFelix Kuehling 		*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
692fe528c13SFelix Kuehling 	else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
693fe528c13SFelix Kuehling 		ret = -EIO;
694f3a39818SAndrew Lewycky 
695f3a39818SAndrew Lewycky 	return ret;
696f3a39818SAndrew Lewycky }
697f3a39818SAndrew Lewycky 
698f3a39818SAndrew Lewycky int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
699f3a39818SAndrew Lewycky {
700f3a39818SAndrew Lewycky 
701f3a39818SAndrew Lewycky 	unsigned long pfn;
70250cb7dd9SFelix Kuehling 	struct kfd_signal_page *page;
703f3a39818SAndrew Lewycky 
704f3a39818SAndrew Lewycky 	/* check required size is logical */
705f3a39818SAndrew Lewycky 	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
706f3a39818SAndrew Lewycky 			get_order(vma->vm_end - vma->vm_start)) {
70779775b62SKent Russell 		pr_err("Event page mmap requested illegal size\n");
708f3a39818SAndrew Lewycky 		return -EINVAL;
709f3a39818SAndrew Lewycky 	}
710f3a39818SAndrew Lewycky 
71150cb7dd9SFelix Kuehling 	page = p->signal_page;
712f3a39818SAndrew Lewycky 	if (!page) {
713f3a39818SAndrew Lewycky 		/* Probably KFD bug, but mmap is user-accessible. */
71450cb7dd9SFelix Kuehling 		pr_debug("Signal page could not be found\n");
715f3a39818SAndrew Lewycky 		return -EINVAL;
716f3a39818SAndrew Lewycky 	}
717f3a39818SAndrew Lewycky 
718f3a39818SAndrew Lewycky 	pfn = __pa(page->kernel_address);
719f3a39818SAndrew Lewycky 	pfn >>= PAGE_SHIFT;
720f3a39818SAndrew Lewycky 
721f3a39818SAndrew Lewycky 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
722f3a39818SAndrew Lewycky 		       | VM_DONTDUMP | VM_PFNMAP;
723f3a39818SAndrew Lewycky 
72479775b62SKent Russell 	pr_debug("Mapping signal page\n");
725f3a39818SAndrew Lewycky 	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
726f3a39818SAndrew Lewycky 	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
727f3a39818SAndrew Lewycky 	pr_debug("     pfn                 == 0x%016lX\n", pfn);
728f3a39818SAndrew Lewycky 	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
729f3a39818SAndrew Lewycky 	pr_debug("     size                == 0x%08lX\n",
730f3a39818SAndrew Lewycky 			vma->vm_end - vma->vm_start);
731f3a39818SAndrew Lewycky 
732f3a39818SAndrew Lewycky 	page->user_address = (uint64_t __user *)vma->vm_start;
733f3a39818SAndrew Lewycky 
734f3a39818SAndrew Lewycky 	/* mapping the page to user process */
735f3a39818SAndrew Lewycky 	return remap_pfn_range(vma, vma->vm_start, pfn,
736f3a39818SAndrew Lewycky 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
737f3a39818SAndrew Lewycky }
73859d3e8beSAlexey Skidanov 
73959d3e8beSAlexey Skidanov /*
74059d3e8beSAlexey Skidanov  * Assumes that p->event_mutex is held and of course
74159d3e8beSAlexey Skidanov  * that p is not going away (current or locked).
74259d3e8beSAlexey Skidanov  */
74359d3e8beSAlexey Skidanov static void lookup_events_by_type_and_signal(struct kfd_process *p,
74459d3e8beSAlexey Skidanov 		int type, void *event_data)
74559d3e8beSAlexey Skidanov {
74659d3e8beSAlexey Skidanov 	struct kfd_hsa_memory_exception_data *ev_data;
74759d3e8beSAlexey Skidanov 	struct kfd_event *ev;
748*482f0777SFelix Kuehling 	uint32_t id;
74959d3e8beSAlexey Skidanov 	bool send_signal = true;
75059d3e8beSAlexey Skidanov 
75159d3e8beSAlexey Skidanov 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
75259d3e8beSAlexey Skidanov 
753*482f0777SFelix Kuehling 	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
754*482f0777SFelix Kuehling 	idr_for_each_entry_continue(&p->event_idr, ev, id)
75559d3e8beSAlexey Skidanov 		if (ev->type == type) {
75659d3e8beSAlexey Skidanov 			send_signal = false;
75759d3e8beSAlexey Skidanov 			dev_dbg(kfd_device,
75859d3e8beSAlexey Skidanov 					"Event found: id %X type %d",
75959d3e8beSAlexey Skidanov 					ev->event_id, ev->type);
76059d3e8beSAlexey Skidanov 			set_event(ev);
76159d3e8beSAlexey Skidanov 			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
76259d3e8beSAlexey Skidanov 				ev->memory_exception_data = *ev_data;
76359d3e8beSAlexey Skidanov 		}
76459d3e8beSAlexey Skidanov 
76559d3e8beSAlexey Skidanov 	/* Send SIGTERM no event of type "type" has been found*/
76659d3e8beSAlexey Skidanov 	if (send_signal) {
76781663016SOded Gabbay 		if (send_sigterm) {
76859d3e8beSAlexey Skidanov 			dev_warn(kfd_device,
76959d3e8beSAlexey Skidanov 				"Sending SIGTERM to HSA Process with PID %d ",
77059d3e8beSAlexey Skidanov 					p->lead_thread->pid);
77159d3e8beSAlexey Skidanov 			send_sig(SIGTERM, p->lead_thread, 0);
77281663016SOded Gabbay 		} else {
77381663016SOded Gabbay 			dev_err(kfd_device,
77481663016SOded Gabbay 				"HSA Process (PID %d) got unhandled exception",
77581663016SOded Gabbay 				p->lead_thread->pid);
77681663016SOded Gabbay 		}
77759d3e8beSAlexey Skidanov 	}
77859d3e8beSAlexey Skidanov }
77959d3e8beSAlexey Skidanov 
78059d3e8beSAlexey Skidanov void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
78159d3e8beSAlexey Skidanov 		unsigned long address, bool is_write_requested,
78259d3e8beSAlexey Skidanov 		bool is_execute_requested)
78359d3e8beSAlexey Skidanov {
78459d3e8beSAlexey Skidanov 	struct kfd_hsa_memory_exception_data memory_exception_data;
78559d3e8beSAlexey Skidanov 	struct vm_area_struct *vma;
78659d3e8beSAlexey Skidanov 
78759d3e8beSAlexey Skidanov 	/*
78859d3e8beSAlexey Skidanov 	 * Because we are called from arbitrary context (workqueue) as opposed
78959d3e8beSAlexey Skidanov 	 * to process context, kfd_process could attempt to exit while we are
79059d3e8beSAlexey Skidanov 	 * running so the lookup function returns a locked process.
79159d3e8beSAlexey Skidanov 	 */
79259d3e8beSAlexey Skidanov 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
7939b56bb11SFelix Kuehling 	struct mm_struct *mm;
79459d3e8beSAlexey Skidanov 
79559d3e8beSAlexey Skidanov 	if (!p)
79659d3e8beSAlexey Skidanov 		return; /* Presumably process exited. */
79759d3e8beSAlexey Skidanov 
7989b56bb11SFelix Kuehling 	/* Take a safe reference to the mm_struct, which may otherwise
7999b56bb11SFelix Kuehling 	 * disappear even while the kfd_process is still referenced.
8009b56bb11SFelix Kuehling 	 */
8019b56bb11SFelix Kuehling 	mm = get_task_mm(p->lead_thread);
8029b56bb11SFelix Kuehling 	if (!mm) {
8039b56bb11SFelix Kuehling 		mutex_unlock(&p->mutex);
8049b56bb11SFelix Kuehling 		return; /* Process is exiting */
8059b56bb11SFelix Kuehling 	}
8069b56bb11SFelix Kuehling 
80759d3e8beSAlexey Skidanov 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
80859d3e8beSAlexey Skidanov 
8099b56bb11SFelix Kuehling 	down_read(&mm->mmap_sem);
8109b56bb11SFelix Kuehling 	vma = find_vma(mm, address);
81159d3e8beSAlexey Skidanov 
81259d3e8beSAlexey Skidanov 	memory_exception_data.gpu_id = dev->id;
81359d3e8beSAlexey Skidanov 	memory_exception_data.va = address;
81459d3e8beSAlexey Skidanov 	/* Set failure reason */
81559d3e8beSAlexey Skidanov 	memory_exception_data.failure.NotPresent = 1;
81659d3e8beSAlexey Skidanov 	memory_exception_data.failure.NoExecute = 0;
81759d3e8beSAlexey Skidanov 	memory_exception_data.failure.ReadOnly = 0;
81859d3e8beSAlexey Skidanov 	if (vma) {
81959d3e8beSAlexey Skidanov 		if (vma->vm_start > address) {
82059d3e8beSAlexey Skidanov 			memory_exception_data.failure.NotPresent = 1;
82159d3e8beSAlexey Skidanov 			memory_exception_data.failure.NoExecute = 0;
82259d3e8beSAlexey Skidanov 			memory_exception_data.failure.ReadOnly = 0;
82359d3e8beSAlexey Skidanov 		} else {
82459d3e8beSAlexey Skidanov 			memory_exception_data.failure.NotPresent = 0;
82559d3e8beSAlexey Skidanov 			if (is_write_requested && !(vma->vm_flags & VM_WRITE))
82659d3e8beSAlexey Skidanov 				memory_exception_data.failure.ReadOnly = 1;
82759d3e8beSAlexey Skidanov 			else
82859d3e8beSAlexey Skidanov 				memory_exception_data.failure.ReadOnly = 0;
82959d3e8beSAlexey Skidanov 			if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
83059d3e8beSAlexey Skidanov 				memory_exception_data.failure.NoExecute = 1;
83159d3e8beSAlexey Skidanov 			else
83259d3e8beSAlexey Skidanov 				memory_exception_data.failure.NoExecute = 0;
83359d3e8beSAlexey Skidanov 		}
83459d3e8beSAlexey Skidanov 	}
83559d3e8beSAlexey Skidanov 
8369b56bb11SFelix Kuehling 	up_read(&mm->mmap_sem);
8379b56bb11SFelix Kuehling 	mmput(mm);
83859d3e8beSAlexey Skidanov 
83959d3e8beSAlexey Skidanov 	mutex_lock(&p->event_mutex);
84059d3e8beSAlexey Skidanov 
84159d3e8beSAlexey Skidanov 	/* Lookup events by type and signal them */
84259d3e8beSAlexey Skidanov 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
84359d3e8beSAlexey Skidanov 			&memory_exception_data);
84459d3e8beSAlexey Skidanov 
84559d3e8beSAlexey Skidanov 	mutex_unlock(&p->event_mutex);
84659d3e8beSAlexey Skidanov 	mutex_unlock(&p->mutex);
84759d3e8beSAlexey Skidanov }
848930c5ff4SAlexey Skidanov 
849930c5ff4SAlexey Skidanov void kfd_signal_hw_exception_event(unsigned int pasid)
850930c5ff4SAlexey Skidanov {
851930c5ff4SAlexey Skidanov 	/*
852930c5ff4SAlexey Skidanov 	 * Because we are called from arbitrary context (workqueue) as opposed
853930c5ff4SAlexey Skidanov 	 * to process context, kfd_process could attempt to exit while we are
854930c5ff4SAlexey Skidanov 	 * running so the lookup function returns a locked process.
855930c5ff4SAlexey Skidanov 	 */
856930c5ff4SAlexey Skidanov 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
857930c5ff4SAlexey Skidanov 
858930c5ff4SAlexey Skidanov 	if (!p)
859930c5ff4SAlexey Skidanov 		return; /* Presumably process exited. */
860930c5ff4SAlexey Skidanov 
861930c5ff4SAlexey Skidanov 	mutex_lock(&p->event_mutex);
862930c5ff4SAlexey Skidanov 
863930c5ff4SAlexey Skidanov 	/* Lookup events by type and signal them */
864930c5ff4SAlexey Skidanov 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
865930c5ff4SAlexey Skidanov 
866930c5ff4SAlexey Skidanov 	mutex_unlock(&p->event_mutex);
867930c5ff4SAlexey Skidanov 	mutex_unlock(&p->mutex);
868930c5ff4SAlexey Skidanov }
869