xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_events.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/mm_types.h>
25 #include <linux/slab.h>
26 #include <linux/types.h>
27 #include <linux/sched/signal.h>
28 #include <linux/sched/mm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mman.h>
31 #include <linux/memory.h>
32 #include "kfd_priv.h"
33 #include "kfd_events.h"
34 #include "kfd_device_queue_manager.h"
35 #include <linux/device.h>
36 
37 /*
38  * Wrapper around wait_queue_entry_t
39  */
40 struct kfd_event_waiter {
41 	wait_queue_entry_t wait;
42 	struct kfd_event *event; /* Event to wait for */
43 	bool activated;		 /* Becomes true when event is signaled */
44 	bool event_age_enabled;  /* set to true when last_event_age is non-zero */
45 };
46 
47 /*
48  * Each signal event needs a 64-bit signal slot where the signaler will write
49  * a 1 before sending an interrupt. (This is needed because some interrupts
50  * do not contain enough spare data bits to identify an event.)
51  * We get whole pages and map them to the process VA.
52  * Individual signal events use their event_id as slot index.
53  */
54 struct kfd_signal_page {
55 	uint64_t *kernel_address;
56 	uint64_t __user *user_address;
57 	bool need_to_free_pages;
58 };
59 
page_slots(struct kfd_signal_page * page)60 static uint64_t *page_slots(struct kfd_signal_page *page)
61 {
62 	return page->kernel_address;
63 }
64 
allocate_signal_page(struct kfd_process * p)65 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
66 {
67 	void *backing_store;
68 	struct kfd_signal_page *page;
69 
70 	page = kzalloc_obj(*page);
71 	if (!page)
72 		return NULL;
73 
74 	backing_store = (void *) __get_free_pages(GFP_KERNEL,
75 					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
76 	if (!backing_store)
77 		goto fail_alloc_signal_store;
78 
79 	/* Initialize all events to unsignaled */
80 	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
81 	       KFD_SIGNAL_EVENT_LIMIT * 8);
82 
83 	page->kernel_address = backing_store;
84 	page->need_to_free_pages = true;
85 	pr_debug("Allocated new event signal page at %p, for process %p\n",
86 			page, p);
87 
88 	return page;
89 
90 fail_alloc_signal_store:
91 	kfree(page);
92 	return NULL;
93 }
94 
allocate_event_notification_slot(struct kfd_process * p,struct kfd_event * ev,const int * restore_id)95 static int allocate_event_notification_slot(struct kfd_process *p,
96 					    struct kfd_event *ev,
97 					    const int *restore_id)
98 {
99 	int id;
100 
101 	if (!p->signal_page) {
102 		p->signal_page = allocate_signal_page(p);
103 		if (!p->signal_page)
104 			return -ENOMEM;
105 		/* Oldest user mode expects 256 event slots */
106 		p->signal_mapped_size = 256*8;
107 	}
108 
109 	if (restore_id) {
110 		id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
111 				GFP_KERNEL);
112 	} else {
113 		/*
114 		 * Compatibility with old user mode: Only use signal slots
115 		 * user mode has mapped, may be less than
116 		 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
117 		 * of the event limit without breaking user mode.
118 		 */
119 		id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
120 				GFP_KERNEL);
121 	}
122 	if (id < 0)
123 		return id;
124 
125 	ev->event_id = id;
126 	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
127 
128 	return 0;
129 }
130 
131 /*
132  * Assumes that p->event_mutex or rcu_readlock is held and of course that p is
133  * not going away.
134  */
lookup_event_by_id(struct kfd_process * p,uint32_t id)135 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
136 {
137 	return idr_find(&p->event_idr, id);
138 }
139 
140 /**
141  * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
142  * @p:     Pointer to struct kfd_process
143  * @id:    ID to look up
144  * @bits:  Number of valid bits in @id
145  *
146  * Finds the first signaled event with a matching partial ID. If no
147  * matching signaled event is found, returns NULL. In that case the
148  * caller should assume that the partial ID is invalid and do an
149  * exhaustive search of all siglaned events.
150  *
151  * If multiple events with the same partial ID signal at the same
152  * time, they will be found one interrupt at a time, not necessarily
153  * in the same order the interrupts occurred. As long as the number of
154  * interrupts is correct, all signaled events will be seen by the
155  * driver.
156  */
lookup_signaled_event_by_partial_id(struct kfd_process * p,uint32_t id,uint32_t bits)157 static struct kfd_event *lookup_signaled_event_by_partial_id(
158 	struct kfd_process *p, uint32_t id, uint32_t bits)
159 {
160 	struct kfd_event *ev;
161 
162 	if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
163 		return NULL;
164 
165 	/* Fast path for the common case that @id is not a partial ID
166 	 * and we only need a single lookup.
167 	 */
168 	if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
169 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
170 			return NULL;
171 
172 		return idr_find(&p->event_idr, id);
173 	}
174 
175 	/* General case for partial IDs: Iterate over all matching IDs
176 	 * and find the first one that has signaled.
177 	 */
178 	for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
179 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
180 			continue;
181 
182 		ev = idr_find(&p->event_idr, id);
183 	}
184 
185 	return ev;
186 }
187 
create_signal_event(struct file * devkfd,struct kfd_process * p,struct kfd_event * ev,const int * restore_id)188 static int create_signal_event(struct file *devkfd, struct kfd_process *p,
189 				struct kfd_event *ev, const int *restore_id)
190 {
191 	int ret;
192 
193 	if (p->signal_mapped_size &&
194 	    p->signal_event_count == p->signal_mapped_size / 8) {
195 		if (!p->signal_event_limit_reached) {
196 			pr_debug("Signal event wasn't created because limit was reached\n");
197 			p->signal_event_limit_reached = true;
198 		}
199 		return -ENOSPC;
200 	}
201 
202 	ret = allocate_event_notification_slot(p, ev, restore_id);
203 	if (ret) {
204 		pr_warn("Signal event wasn't created because out of kernel memory\n");
205 		return ret;
206 	}
207 
208 	p->signal_event_count++;
209 
210 	ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
211 	pr_debug("Signal event number %zu created with id %d, address %p\n",
212 			p->signal_event_count, ev->event_id,
213 			ev->user_signal_address);
214 
215 	return 0;
216 }
217 
create_other_event(struct kfd_process * p,struct kfd_event * ev,const int * restore_id)218 static int create_other_event(struct kfd_process *p, struct kfd_event *ev, const int *restore_id)
219 {
220 	int id;
221 
222 	if (restore_id)
223 		id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
224 			GFP_KERNEL);
225 	else
226 		/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
227 		 * intentional integer overflow to -1 without a compiler
228 		 * warning. idr_alloc treats a negative value as "maximum
229 		 * signed integer".
230 		 */
231 		id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
232 				(uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
233 				GFP_KERNEL);
234 
235 	if (id < 0)
236 		return id;
237 	ev->event_id = id;
238 
239 	return 0;
240 }
241 
kfd_event_init_process(struct kfd_process * p)242 int kfd_event_init_process(struct kfd_process *p)
243 {
244 	int id;
245 
246 	mutex_init(&p->event_mutex);
247 	idr_init(&p->event_idr);
248 	p->signal_page = NULL;
249 	p->signal_event_count = 1;
250 	/* Allocate event ID 0. It is used for a fast path to ignore bogus events
251 	 * that are sent by the CP without a context ID
252 	 */
253 	id = idr_alloc(&p->event_idr, NULL, 0, 1, GFP_KERNEL);
254 	if (id < 0) {
255 		idr_destroy(&p->event_idr);
256 		mutex_destroy(&p->event_mutex);
257 		return id;
258 	}
259 	return 0;
260 }
261 
destroy_event(struct kfd_process * p,struct kfd_event * ev)262 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
263 {
264 	struct kfd_event_waiter *waiter;
265 
266 	/* Wake up pending waiters. They will return failure */
267 	spin_lock(&ev->lock);
268 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
269 		WRITE_ONCE(waiter->event, NULL);
270 	wake_up_all(&ev->wq);
271 	spin_unlock(&ev->lock);
272 
273 	if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
274 	    ev->type == KFD_EVENT_TYPE_DEBUG)
275 		p->signal_event_count--;
276 
277 	idr_remove(&p->event_idr, ev->event_id);
278 	kfree_rcu(ev, rcu);
279 }
280 
destroy_events(struct kfd_process * p)281 static void destroy_events(struct kfd_process *p)
282 {
283 	struct kfd_event *ev;
284 	uint32_t id;
285 
286 	idr_for_each_entry(&p->event_idr, ev, id)
287 		if (ev)
288 			destroy_event(p, ev);
289 	idr_destroy(&p->event_idr);
290 	mutex_destroy(&p->event_mutex);
291 }
292 
293 /*
294  * We assume that the process is being destroyed and there is no need to
295  * unmap the pages or keep bookkeeping data in order.
296  */
shutdown_signal_page(struct kfd_process * p)297 static void shutdown_signal_page(struct kfd_process *p)
298 {
299 	struct kfd_signal_page *page = p->signal_page;
300 
301 	if (page) {
302 		if (page->need_to_free_pages)
303 			free_pages((unsigned long)page->kernel_address,
304 				   get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
305 		kfree(page);
306 	}
307 }
308 
kfd_event_free_process(struct kfd_process * p)309 void kfd_event_free_process(struct kfd_process *p)
310 {
311 	destroy_events(p);
312 	shutdown_signal_page(p);
313 }
314 
event_can_be_gpu_signaled(const struct kfd_event * ev)315 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
316 {
317 	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
318 					ev->type == KFD_EVENT_TYPE_DEBUG;
319 }
320 
event_can_be_cpu_signaled(const struct kfd_event * ev)321 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
322 {
323 	return ev->type == KFD_EVENT_TYPE_SIGNAL;
324 }
325 
kfd_event_page_set(struct kfd_process * p,void * kernel_address,uint64_t size,uint64_t user_handle)326 static int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
327 		       uint64_t size, uint64_t user_handle)
328 {
329 	struct kfd_signal_page *page;
330 
331 	if (p->signal_page)
332 		return -EBUSY;
333 
334 	if (size < KFD_SIGNAL_EVENT_LIMIT * 8) {
335 		pr_err("Event page size %llu is too small, need at least %lu bytes\n",
336 				size, (unsigned long)(KFD_SIGNAL_EVENT_LIMIT * 8));
337 		return -EINVAL;
338 	}
339 
340 	page = kzalloc_obj(*page);
341 	if (!page)
342 		return -ENOMEM;
343 
344 	/* Initialize all events to unsignaled */
345 	memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
346 	       KFD_SIGNAL_EVENT_LIMIT * 8);
347 
348 	page->kernel_address = kernel_address;
349 
350 	p->signal_page = page;
351 	p->signal_mapped_size = size;
352 	p->signal_handle = user_handle;
353 	return 0;
354 }
355 
kfd_kmap_event_page(struct kfd_process * p,uint64_t event_page_offset)356 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset)
357 {
358 	struct kfd_node *kfd;
359 	struct kfd_process_device *pdd;
360 	void *mem, *kern_addr;
361 	uint64_t size;
362 	int err = 0;
363 
364 	if (p->signal_page) {
365 		pr_err("Event page is already set\n");
366 		return -EINVAL;
367 	}
368 
369 	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(event_page_offset));
370 	if (!pdd) {
371 		pr_err("Getting device by id failed in %s\n", __func__);
372 		return -EINVAL;
373 	}
374 	kfd = pdd->dev;
375 
376 	pdd = kfd_bind_process_to_device(kfd, p);
377 	if (IS_ERR(pdd))
378 		return PTR_ERR(pdd);
379 
380 	mem = kfd_process_device_translate_handle(pdd,
381 			GET_IDR_HANDLE(event_page_offset));
382 	if (!mem) {
383 		pr_err("Can't find BO, offset is 0x%llx\n", event_page_offset);
384 		return -EINVAL;
385 	}
386 
387 	err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(mem, &kern_addr, &size);
388 	if (err) {
389 		pr_err("Failed to map event page to kernel\n");
390 		return err;
391 	}
392 
393 	err = kfd_event_page_set(p, kern_addr, size, event_page_offset);
394 	if (err) {
395 		pr_err("Failed to set event page\n");
396 		amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
397 		return err;
398 	}
399 	return err;
400 }
401 
kfd_event_create(struct file * devkfd,struct kfd_process * p,uint32_t event_type,bool auto_reset,uint32_t node_id,uint32_t * event_id,uint32_t * event_trigger_data,uint64_t * event_page_offset,uint32_t * event_slot_index)402 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
403 		     uint32_t event_type, bool auto_reset, uint32_t node_id,
404 		     uint32_t *event_id, uint32_t *event_trigger_data,
405 		     uint64_t *event_page_offset, uint32_t *event_slot_index)
406 {
407 	int ret = 0;
408 	struct kfd_event *ev = kzalloc_obj(*ev);
409 
410 	if (!ev)
411 		return -ENOMEM;
412 
413 	ev->type = event_type;
414 	ev->auto_reset = auto_reset;
415 	ev->signaled = false;
416 
417 	spin_lock_init(&ev->lock);
418 	init_waitqueue_head(&ev->wq);
419 
420 	*event_page_offset = 0;
421 
422 	mutex_lock(&p->event_mutex);
423 
424 	switch (event_type) {
425 	case KFD_EVENT_TYPE_SIGNAL:
426 	case KFD_EVENT_TYPE_DEBUG:
427 		ret = create_signal_event(devkfd, p, ev, NULL);
428 		if (!ret) {
429 			*event_page_offset = KFD_MMAP_TYPE_EVENTS;
430 			*event_slot_index = ev->event_id;
431 		}
432 		break;
433 	default:
434 		ret = create_other_event(p, ev, NULL);
435 		break;
436 	}
437 
438 	if (!ret) {
439 		*event_id = ev->event_id;
440 		*event_trigger_data = ev->event_id;
441 		ev->event_age = 1;
442 	} else {
443 		kfree(ev);
444 	}
445 
446 	mutex_unlock(&p->event_mutex);
447 
448 	return ret;
449 }
450 
kfd_criu_restore_event(struct file * devkfd,struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)451 int kfd_criu_restore_event(struct file *devkfd,
452 			   struct kfd_process *p,
453 			   uint8_t __user *user_priv_ptr,
454 			   uint64_t *priv_data_offset,
455 			   uint64_t max_priv_data_size)
456 {
457 	struct kfd_criu_event_priv_data *ev_priv;
458 	struct kfd_event *ev = NULL;
459 	int ret = 0;
460 
461 	ev_priv = kmalloc_obj(*ev_priv);
462 	if (!ev_priv)
463 		return -ENOMEM;
464 
465 	ev = kzalloc_obj(*ev);
466 	if (!ev) {
467 		ret = -ENOMEM;
468 		goto exit;
469 	}
470 
471 	if (*priv_data_offset + sizeof(*ev_priv) > max_priv_data_size) {
472 		ret = -EINVAL;
473 		goto exit;
474 	}
475 
476 	ret = copy_from_user(ev_priv, user_priv_ptr + *priv_data_offset, sizeof(*ev_priv));
477 	if (ret) {
478 		ret = -EFAULT;
479 		goto exit;
480 	}
481 	*priv_data_offset += sizeof(*ev_priv);
482 
483 	if (ev_priv->user_handle) {
484 		ret = kfd_kmap_event_page(p, ev_priv->user_handle);
485 		if (ret)
486 			goto exit;
487 	}
488 
489 	ev->type = ev_priv->type;
490 	ev->auto_reset = ev_priv->auto_reset;
491 	ev->signaled = ev_priv->signaled;
492 
493 	spin_lock_init(&ev->lock);
494 	init_waitqueue_head(&ev->wq);
495 
496 	mutex_lock(&p->event_mutex);
497 	switch (ev->type) {
498 	case KFD_EVENT_TYPE_SIGNAL:
499 	case KFD_EVENT_TYPE_DEBUG:
500 		ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id);
501 		break;
502 	case KFD_EVENT_TYPE_MEMORY:
503 		memcpy(&ev->memory_exception_data,
504 			&ev_priv->memory_exception_data,
505 			sizeof(struct kfd_hsa_memory_exception_data));
506 
507 		ret = create_other_event(p, ev, &ev_priv->event_id);
508 		break;
509 	case KFD_EVENT_TYPE_HW_EXCEPTION:
510 		memcpy(&ev->hw_exception_data,
511 			&ev_priv->hw_exception_data,
512 			sizeof(struct kfd_hsa_hw_exception_data));
513 
514 		ret = create_other_event(p, ev, &ev_priv->event_id);
515 		break;
516 	}
517 	mutex_unlock(&p->event_mutex);
518 
519 exit:
520 	if (ret)
521 		kfree(ev);
522 
523 	kfree(ev_priv);
524 
525 	return ret;
526 }
527 
kfd_criu_checkpoint_events(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)528 int kfd_criu_checkpoint_events(struct kfd_process *p,
529 			 uint8_t __user *user_priv_data,
530 			 uint64_t *priv_data_offset)
531 {
532 	struct kfd_criu_event_priv_data *ev_privs;
533 	int i = 0;
534 	int ret =  0;
535 	struct kfd_event *ev;
536 	uint32_t ev_id;
537 
538 	uint32_t num_events = kfd_get_num_events(p);
539 
540 	if (!num_events)
541 		return 0;
542 
543 	ev_privs = kvzalloc(num_events * sizeof(*ev_privs), GFP_KERNEL);
544 	if (!ev_privs)
545 		return -ENOMEM;
546 
547 
548 	idr_for_each_entry(&p->event_idr, ev, ev_id) {
549 		struct kfd_criu_event_priv_data *ev_priv;
550 
551 		/*
552 		 * Currently, all events have same size of private_data, but the current ioctl's
553 		 * and CRIU plugin supports private_data of variable sizes
554 		 */
555 		ev_priv = &ev_privs[i];
556 
557 		ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT;
558 
559 		/* We store the user_handle with the first event */
560 		if (i == 0 && p->signal_page)
561 			ev_priv->user_handle = p->signal_handle;
562 
563 		ev_priv->event_id = ev->event_id;
564 		ev_priv->auto_reset = ev->auto_reset;
565 		ev_priv->type = ev->type;
566 		ev_priv->signaled = ev->signaled;
567 
568 		if (ev_priv->type == KFD_EVENT_TYPE_MEMORY)
569 			memcpy(&ev_priv->memory_exception_data,
570 				&ev->memory_exception_data,
571 				sizeof(struct kfd_hsa_memory_exception_data));
572 		else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION)
573 			memcpy(&ev_priv->hw_exception_data,
574 				&ev->hw_exception_data,
575 				sizeof(struct kfd_hsa_hw_exception_data));
576 
577 		pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n",
578 			  i,
579 			  ev_priv->event_id,
580 			  ev_priv->auto_reset,
581 			  ev_priv->type,
582 			  ev_priv->signaled);
583 		i++;
584 	}
585 
586 	ret = copy_to_user(user_priv_data + *priv_data_offset,
587 			   ev_privs, num_events * sizeof(*ev_privs));
588 	if (ret) {
589 		pr_err("Failed to copy events priv to user\n");
590 		ret = -EFAULT;
591 	}
592 
593 	*priv_data_offset += num_events * sizeof(*ev_privs);
594 
595 	kvfree(ev_privs);
596 	return ret;
597 }
598 
kfd_get_num_events(struct kfd_process * p)599 int kfd_get_num_events(struct kfd_process *p)
600 {
601 	struct kfd_event *ev;
602 	uint32_t id;
603 	u32 num_events = 0;
604 
605 	idr_for_each_entry(&p->event_idr, ev, id)
606 		num_events++;
607 
608 	return num_events;
609 }
610 
611 /* Assumes that p is current. */
kfd_event_destroy(struct kfd_process * p,uint32_t event_id)612 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
613 {
614 	struct kfd_event *ev;
615 	int ret = 0;
616 
617 	mutex_lock(&p->event_mutex);
618 
619 	ev = lookup_event_by_id(p, event_id);
620 
621 	if (ev)
622 		destroy_event(p, ev);
623 	else
624 		ret = -EINVAL;
625 
626 	mutex_unlock(&p->event_mutex);
627 	return ret;
628 }
629 
set_event(struct kfd_event * ev)630 static void set_event(struct kfd_event *ev)
631 {
632 	struct kfd_event_waiter *waiter;
633 
634 	/* Auto reset if the list is non-empty and we're waking
635 	 * someone. waitqueue_active is safe here because we're
636 	 * protected by the ev->lock, which is also held when
637 	 * updating the wait queues in kfd_wait_on_events.
638 	 */
639 	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
640 	if (!(++ev->event_age)) {
641 		/* Never wrap back to reserved/default event age 0/1 */
642 		ev->event_age = 2;
643 		WARN_ONCE(1, "event_age wrap back!");
644 	}
645 
646 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
647 		WRITE_ONCE(waiter->activated, true);
648 
649 	wake_up_all(&ev->wq);
650 }
651 
652 /* Assumes that p is current. */
kfd_set_event(struct kfd_process * p,uint32_t event_id)653 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
654 {
655 	int ret = 0;
656 	struct kfd_event *ev;
657 
658 	rcu_read_lock();
659 
660 	ev = lookup_event_by_id(p, event_id);
661 	if (!ev) {
662 		ret = -EINVAL;
663 		goto unlock_rcu;
664 	}
665 	spin_lock(&ev->lock);
666 
667 	if (event_can_be_cpu_signaled(ev))
668 		set_event(ev);
669 	else
670 		ret = -EINVAL;
671 
672 	spin_unlock(&ev->lock);
673 unlock_rcu:
674 	rcu_read_unlock();
675 	return ret;
676 }
677 
reset_event(struct kfd_event * ev)678 static void reset_event(struct kfd_event *ev)
679 {
680 	ev->signaled = false;
681 }
682 
683 /* Assumes that p is current. */
kfd_reset_event(struct kfd_process * p,uint32_t event_id)684 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
685 {
686 	int ret = 0;
687 	struct kfd_event *ev;
688 
689 	rcu_read_lock();
690 
691 	ev = lookup_event_by_id(p, event_id);
692 	if (!ev) {
693 		ret = -EINVAL;
694 		goto unlock_rcu;
695 	}
696 	spin_lock(&ev->lock);
697 
698 	if (event_can_be_cpu_signaled(ev))
699 		reset_event(ev);
700 	else
701 		ret = -EINVAL;
702 
703 	spin_unlock(&ev->lock);
704 unlock_rcu:
705 	rcu_read_unlock();
706 	return ret;
707 
708 }
709 
acknowledge_signal(struct kfd_process * p,struct kfd_event * ev)710 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
711 {
712 	WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT);
713 }
714 
set_event_from_interrupt(struct kfd_process * p,struct kfd_event * ev)715 static void set_event_from_interrupt(struct kfd_process *p,
716 					struct kfd_event *ev)
717 {
718 	if (ev && event_can_be_gpu_signaled(ev)) {
719 		acknowledge_signal(p, ev);
720 		spin_lock(&ev->lock);
721 		set_event(ev);
722 		spin_unlock(&ev->lock);
723 	}
724 }
725 
kfd_signal_event_interrupt(u32 pasid,uint32_t partial_id,uint32_t valid_id_bits)726 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
727 				uint32_t valid_id_bits)
728 {
729 	struct kfd_event *ev = NULL;
730 
731 	/*
732 	 * Because we are called from arbitrary context (workqueue) as opposed
733 	 * to process context, kfd_process could attempt to exit while we are
734 	 * running so the lookup function increments the process ref count.
735 	 */
736 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
737 
738 	if (!p)
739 		return; /* Presumably process exited. */
740 
741 	rcu_read_lock();
742 
743 	if (valid_id_bits)
744 		ev = lookup_signaled_event_by_partial_id(p, partial_id,
745 							 valid_id_bits);
746 	if (ev) {
747 		set_event_from_interrupt(p, ev);
748 	} else if (p->signal_page) {
749 		/*
750 		 * Partial ID lookup failed. Assume that the event ID
751 		 * in the interrupt payload was invalid and do an
752 		 * exhaustive search of signaled events.
753 		 */
754 		uint64_t *slots = page_slots(p->signal_page);
755 		uint32_t id;
756 
757 		if (valid_id_bits)
758 			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
759 					     partial_id, valid_id_bits);
760 
761 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
762 			/* With relatively few events, it's faster to
763 			 * iterate over the event IDR
764 			 */
765 			idr_for_each_entry(&p->event_idr, ev, id) {
766 				if (id >= KFD_SIGNAL_EVENT_LIMIT)
767 					break;
768 
769 				if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT)
770 					set_event_from_interrupt(p, ev);
771 			}
772 		} else {
773 			/* With relatively many events, it's faster to
774 			 * iterate over the signal slots and lookup
775 			 * only signaled events from the IDR.
776 			 */
777 			for (id = 1; id < KFD_SIGNAL_EVENT_LIMIT; id++)
778 				if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) {
779 					ev = lookup_event_by_id(p, id);
780 					set_event_from_interrupt(p, ev);
781 				}
782 		}
783 	}
784 
785 	rcu_read_unlock();
786 	kfd_unref_process(p);
787 }
788 
alloc_event_waiters(uint32_t num_events)789 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
790 {
791 	struct kfd_event_waiter *event_waiters;
792 	uint32_t i;
793 
794 	event_waiters = kzalloc_objs(struct kfd_event_waiter, num_events);
795 	if (!event_waiters)
796 		return NULL;
797 
798 	for (i = 0; i < num_events; i++)
799 		init_wait(&event_waiters[i].wait);
800 
801 	return event_waiters;
802 }
803 
init_event_waiter(struct kfd_process * p,struct kfd_event_waiter * waiter,struct kfd_event_data * event_data)804 static int init_event_waiter(struct kfd_process *p,
805 		struct kfd_event_waiter *waiter,
806 		struct kfd_event_data *event_data)
807 {
808 	struct kfd_event *ev = lookup_event_by_id(p, event_data->event_id);
809 
810 	if (!ev)
811 		return -EINVAL;
812 
813 	spin_lock(&ev->lock);
814 	waiter->event = ev;
815 	waiter->activated = ev->signaled;
816 	ev->signaled = ev->signaled && !ev->auto_reset;
817 
818 	/* last_event_age = 0 reserved for backward compatible */
819 	if (waiter->event->type == KFD_EVENT_TYPE_SIGNAL &&
820 		event_data->signal_event_data.last_event_age) {
821 		waiter->event_age_enabled = true;
822 		if (ev->event_age != event_data->signal_event_data.last_event_age)
823 			waiter->activated = true;
824 	}
825 
826 	if (!waiter->activated)
827 		add_wait_queue(&ev->wq, &waiter->wait);
828 	spin_unlock(&ev->lock);
829 
830 	return 0;
831 }
832 
833 /* test_event_condition - Test condition of events being waited for
834  * @all:           Return completion only if all events have signaled
835  * @num_events:    Number of events to wait for
836  * @event_waiters: Array of event waiters, one per event
837  *
838  * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
839  * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
840  * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
841  * the events have been destroyed.
842  */
test_event_condition(bool all,uint32_t num_events,struct kfd_event_waiter * event_waiters)843 static uint32_t test_event_condition(bool all, uint32_t num_events,
844 				struct kfd_event_waiter *event_waiters)
845 {
846 	uint32_t i;
847 	uint32_t activated_count = 0;
848 
849 	for (i = 0; i < num_events; i++) {
850 		if (!READ_ONCE(event_waiters[i].event))
851 			return KFD_IOC_WAIT_RESULT_FAIL;
852 
853 		if (READ_ONCE(event_waiters[i].activated)) {
854 			if (!all)
855 				return KFD_IOC_WAIT_RESULT_COMPLETE;
856 
857 			activated_count++;
858 		}
859 	}
860 
861 	return activated_count == num_events ?
862 		KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
863 }
864 
865 /*
866  * Copy event specific data, if defined.
867  * Currently only memory exception events have additional data to copy to user
868  */
copy_signaled_event_data(uint32_t num_events,struct kfd_event_waiter * event_waiters,struct kfd_event_data __user * data)869 static int copy_signaled_event_data(uint32_t num_events,
870 		struct kfd_event_waiter *event_waiters,
871 		struct kfd_event_data __user *data)
872 {
873 	void *src;
874 	void __user *dst;
875 	struct kfd_event_waiter *waiter;
876 	struct kfd_event *event;
877 	uint32_t i, size = 0;
878 
879 	for (i = 0; i < num_events; i++) {
880 		waiter = &event_waiters[i];
881 		event = waiter->event;
882 		if (!event)
883 			return -EINVAL; /* event was destroyed */
884 		if (waiter->activated) {
885 			if (event->type == KFD_EVENT_TYPE_MEMORY) {
886 				dst = &data[i].memory_exception_data;
887 				src = &event->memory_exception_data;
888 				size = sizeof(struct kfd_hsa_memory_exception_data);
889 			} else if (event->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
890 				dst = &data[i].memory_exception_data;
891 				src = &event->hw_exception_data;
892 				size = sizeof(struct kfd_hsa_hw_exception_data);
893 			} else if (event->type == KFD_EVENT_TYPE_SIGNAL &&
894 				waiter->event_age_enabled) {
895 				dst = &data[i].signal_event_data.last_event_age;
896 				src = &event->event_age;
897 				size = sizeof(u64);
898 			}
899 			if (size && copy_to_user(dst, src, size))
900 				return -EFAULT;
901 		}
902 	}
903 
904 	return 0;
905 }
906 
user_timeout_to_jiffies(uint32_t user_timeout_ms)907 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
908 {
909 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
910 		return 0;
911 
912 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
913 		return MAX_SCHEDULE_TIMEOUT;
914 
915 	/*
916 	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
917 	 * but we consider them finite.
918 	 * This hack is wrong, but nobody is likely to notice.
919 	 */
920 	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
921 
922 	return msecs_to_jiffies(user_timeout_ms) + 1;
923 }
924 
free_waiters(uint32_t num_events,struct kfd_event_waiter * waiters,bool undo_auto_reset)925 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
926 			 bool undo_auto_reset)
927 {
928 	uint32_t i;
929 
930 	for (i = 0; i < num_events; i++)
931 		if (waiters[i].event) {
932 			spin_lock(&waiters[i].event->lock);
933 			remove_wait_queue(&waiters[i].event->wq,
934 					  &waiters[i].wait);
935 			if (undo_auto_reset && waiters[i].activated &&
936 			    waiters[i].event && waiters[i].event->auto_reset)
937 				set_event(waiters[i].event);
938 			spin_unlock(&waiters[i].event->lock);
939 		}
940 
941 	kfree(waiters);
942 }
943 
kfd_wait_on_events(struct kfd_process * p,uint32_t num_events,void __user * data,bool all,uint32_t * user_timeout_ms,uint32_t * wait_result)944 int kfd_wait_on_events(struct kfd_process *p,
945 		       uint32_t num_events, void __user *data,
946 		       bool all, uint32_t *user_timeout_ms,
947 		       uint32_t *wait_result)
948 {
949 	struct kfd_event_data __user *events =
950 			(struct kfd_event_data __user *) data;
951 	uint32_t i;
952 	int ret = 0;
953 
954 	struct kfd_event_waiter *event_waiters = NULL;
955 	long timeout = user_timeout_to_jiffies(*user_timeout_ms);
956 
957 	event_waiters = alloc_event_waiters(num_events);
958 	if (!event_waiters) {
959 		ret = -ENOMEM;
960 		goto out;
961 	}
962 
963 	/* Use p->event_mutex here to protect against concurrent creation and
964 	 * destruction of events while we initialize event_waiters.
965 	 */
966 	mutex_lock(&p->event_mutex);
967 
968 	for (i = 0; i < num_events; i++) {
969 		struct kfd_event_data event_data;
970 
971 		if (copy_from_user(&event_data, &events[i],
972 				sizeof(struct kfd_event_data))) {
973 			ret = -EFAULT;
974 			goto out_unlock;
975 		}
976 
977 		ret = init_event_waiter(p, &event_waiters[i], &event_data);
978 		if (ret)
979 			goto out_unlock;
980 	}
981 
982 	/* Check condition once. */
983 	*wait_result = test_event_condition(all, num_events, event_waiters);
984 	if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
985 		ret = copy_signaled_event_data(num_events,
986 					       event_waiters, events);
987 		goto out_unlock;
988 	} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
989 		/* This should not happen. Events shouldn't be
990 		 * destroyed while we're holding the event_mutex
991 		 */
992 		goto out_unlock;
993 	}
994 
995 	mutex_unlock(&p->event_mutex);
996 
997 	while (true) {
998 		if (fatal_signal_pending(current)) {
999 			ret = -EINTR;
1000 			break;
1001 		}
1002 
1003 		if (signal_pending(current)) {
1004 			ret = -ERESTARTSYS;
1005 			if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
1006 			    *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
1007 				*user_timeout_ms = jiffies_to_msecs(
1008 					max(0l, timeout-1));
1009 			break;
1010 		}
1011 
1012 		/* Set task state to interruptible sleep before
1013 		 * checking wake-up conditions. A concurrent wake-up
1014 		 * will put the task back into runnable state. In that
1015 		 * case schedule_timeout will not put the task to
1016 		 * sleep and we'll get a chance to re-check the
1017 		 * updated conditions almost immediately. Otherwise,
1018 		 * this race condition would lead to a soft hang or a
1019 		 * very long sleep.
1020 		 */
1021 		set_current_state(TASK_INTERRUPTIBLE);
1022 
1023 		*wait_result = test_event_condition(all, num_events,
1024 						    event_waiters);
1025 		if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
1026 			break;
1027 
1028 		if (timeout <= 0)
1029 			break;
1030 
1031 		timeout = schedule_timeout(timeout);
1032 	}
1033 	__set_current_state(TASK_RUNNING);
1034 
1035 	mutex_lock(&p->event_mutex);
1036 	/* copy_signaled_event_data may sleep. So this has to happen
1037 	 * after the task state is set back to RUNNING.
1038 	 *
1039 	 * The event may also have been destroyed after signaling. So
1040 	 * copy_signaled_event_data also must confirm that the event
1041 	 * still exists. Therefore this must be under the p->event_mutex
1042 	 * which is also held when events are destroyed.
1043 	 */
1044 	if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
1045 		ret = copy_signaled_event_data(num_events,
1046 					       event_waiters, events);
1047 
1048 out_unlock:
1049 	free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
1050 	mutex_unlock(&p->event_mutex);
1051 out:
1052 	if (ret)
1053 		*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
1054 	else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
1055 		ret = -EIO;
1056 
1057 	return ret;
1058 }
1059 
kfd_event_mmap(struct kfd_process * p,struct vm_area_struct * vma)1060 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
1061 {
1062 	unsigned long pfn;
1063 	struct kfd_signal_page *page;
1064 	int ret;
1065 
1066 	/* check required size doesn't exceed the allocated size */
1067 	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
1068 			get_order(vma->vm_end - vma->vm_start)) {
1069 		pr_err("Event page mmap requested illegal size\n");
1070 		return -EINVAL;
1071 	}
1072 
1073 	page = p->signal_page;
1074 	if (!page) {
1075 		/* Probably KFD bug, but mmap is user-accessible. */
1076 		pr_debug("Signal page could not be found\n");
1077 		return -EINVAL;
1078 	}
1079 
1080 	pfn = __pa(page->kernel_address);
1081 	pfn >>= PAGE_SHIFT;
1082 
1083 	vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
1084 		       | VM_DONTDUMP | VM_PFNMAP);
1085 
1086 	pr_debug("Mapping signal page\n");
1087 	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
1088 	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
1089 	pr_debug("     pfn                 == 0x%016lX\n", pfn);
1090 	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
1091 	pr_debug("     size                == 0x%08lX\n",
1092 			vma->vm_end - vma->vm_start);
1093 
1094 	page->user_address = (uint64_t __user *)vma->vm_start;
1095 
1096 	/* mapping the page to user process */
1097 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
1098 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
1099 	if (!ret)
1100 		p->signal_mapped_size = vma->vm_end - vma->vm_start;
1101 
1102 	return ret;
1103 }
1104 
1105 /*
1106  * Assumes that p is not going away.
1107  */
lookup_events_by_type_and_signal(struct kfd_process * p,int type,void * event_data)1108 static void lookup_events_by_type_and_signal(struct kfd_process *p,
1109 		int type, void *event_data)
1110 {
1111 	struct kfd_hsa_memory_exception_data *ev_data;
1112 	struct kfd_event *ev;
1113 	uint32_t id;
1114 	bool send_signal = true;
1115 
1116 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
1117 
1118 	rcu_read_lock();
1119 
1120 	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1121 	idr_for_each_entry_continue(&p->event_idr, ev, id)
1122 		if (ev->type == type) {
1123 			send_signal = false;
1124 			dev_dbg(kfd_device,
1125 					"Event found: id %X type %d",
1126 					ev->event_id, ev->type);
1127 			spin_lock(&ev->lock);
1128 			set_event(ev);
1129 			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
1130 				ev->memory_exception_data = *ev_data;
1131 			spin_unlock(&ev->lock);
1132 		}
1133 
1134 	if (type == KFD_EVENT_TYPE_MEMORY) {
1135 		dev_warn(kfd_device,
1136 			"Sending SIGSEGV to process pid %d",
1137 				p->lead_thread->pid);
1138 		send_sig(SIGSEGV, p->lead_thread, 0);
1139 	}
1140 
1141 	/* Send SIGTERM no event of type "type" has been found*/
1142 	if (send_signal) {
1143 		if (send_sigterm) {
1144 			dev_warn(kfd_device,
1145 				"Sending SIGTERM to process pid %d",
1146 					p->lead_thread->pid);
1147 			send_sig(SIGTERM, p->lead_thread, 0);
1148 		} else {
1149 			dev_err(kfd_device,
1150 				"Process pid %d got unhandled exception",
1151 				p->lead_thread->pid);
1152 		}
1153 	}
1154 
1155 	rcu_read_unlock();
1156 }
1157 
kfd_signal_hw_exception_event(u32 pasid)1158 void kfd_signal_hw_exception_event(u32 pasid)
1159 {
1160 	/*
1161 	 * Because we are called from arbitrary context (workqueue) as opposed
1162 	 * to process context, kfd_process could attempt to exit while we are
1163 	 * running so the lookup function increments the process ref count.
1164 	 */
1165 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
1166 
1167 	if (!p)
1168 		return; /* Presumably process exited. */
1169 
1170 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
1171 	kfd_unref_process(p);
1172 }
1173 
kfd_signal_vm_fault_event_with_userptr(struct kfd_process * p,uint64_t gpu_va)1174 void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va)
1175 {
1176 	struct kfd_process_device *pdd;
1177 	struct kfd_hsa_memory_exception_data exception_data;
1178 	int i;
1179 
1180 	memset(&exception_data, 0, sizeof(exception_data));
1181 	exception_data.va = gpu_va;
1182 	exception_data.failure.NotPresent = 1;
1183 
1184 	// Send VM seg fault to all kfd process device
1185 	for (i = 0; i < p->n_pdds; i++) {
1186 		pdd = p->pdds[i];
1187 		exception_data.gpu_id = pdd->user_gpu_id;
1188 		kfd_evict_process_device(pdd);
1189 		kfd_signal_vm_fault_event(pdd, NULL, &exception_data);
1190 	}
1191 }
1192 
kfd_signal_vm_fault_event(struct kfd_process_device * pdd,struct kfd_vm_fault_info * info,struct kfd_hsa_memory_exception_data * data)1193 void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
1194 				struct kfd_vm_fault_info *info,
1195 				struct kfd_hsa_memory_exception_data *data)
1196 {
1197 	struct kfd_event *ev;
1198 	uint32_t id;
1199 	struct kfd_process *p = pdd->process;
1200 	struct kfd_hsa_memory_exception_data memory_exception_data;
1201 	int user_gpu_id;
1202 
1203 	user_gpu_id = kfd_process_get_user_gpu_id(p, pdd->dev->id);
1204 	if (unlikely(user_gpu_id == -EINVAL)) {
1205 		WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n",
1206 			  pdd->dev->id);
1207 		return;
1208 	}
1209 
1210 	/* SoC15 chips and onwards will pass in data from now on. */
1211 	if (!data) {
1212 		memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1213 		memory_exception_data.gpu_id = user_gpu_id;
1214 		memory_exception_data.failure.imprecise = true;
1215 
1216 		/* Set failure reason */
1217 		if (info) {
1218 			memory_exception_data.va = (info->page_addr) <<
1219 								PAGE_SHIFT;
1220 			memory_exception_data.failure.NotPresent =
1221 				info->prot_valid ? 1 : 0;
1222 			memory_exception_data.failure.NoExecute =
1223 				info->prot_exec ? 1 : 0;
1224 			memory_exception_data.failure.ReadOnly =
1225 				info->prot_write ? 1 : 0;
1226 			memory_exception_data.failure.imprecise = 0;
1227 		}
1228 	}
1229 
1230 	rcu_read_lock();
1231 
1232 	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1233 	idr_for_each_entry_continue(&p->event_idr, ev, id)
1234 		if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1235 			spin_lock(&ev->lock);
1236 			ev->memory_exception_data = data ? *data :
1237 							memory_exception_data;
1238 			set_event(ev);
1239 			spin_unlock(&ev->lock);
1240 		}
1241 
1242 	rcu_read_unlock();
1243 }
1244 
kfd_signal_reset_event(struct kfd_node * dev)1245 void kfd_signal_reset_event(struct kfd_node *dev)
1246 {
1247 	struct kfd_hsa_hw_exception_data hw_exception_data;
1248 	struct kfd_hsa_memory_exception_data memory_exception_data;
1249 	struct kfd_process *p;
1250 	struct kfd_event *ev;
1251 	unsigned int temp;
1252 	uint32_t id, idx;
1253 	int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
1254 			KFD_HW_EXCEPTION_ECC :
1255 			KFD_HW_EXCEPTION_GPU_HANG;
1256 
1257 	/* Whole gpu reset caused by GPU hang and memory is lost */
1258 	memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1259 	hw_exception_data.memory_lost = 1;
1260 	hw_exception_data.reset_cause = reset_cause;
1261 
1262 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1263 	memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
1264 	memory_exception_data.failure.imprecise = true;
1265 
1266 	idx = srcu_read_lock(&kfd_processes_srcu);
1267 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1268 		int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
1269 		struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p);
1270 
1271 		if (unlikely(user_gpu_id == -EINVAL)) {
1272 			WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
1273 			continue;
1274 		}
1275 
1276 		if (unlikely(!pdd)) {
1277 			WARN_ONCE(1, "Could not get device data from process pid:%d\n",
1278 				  p->lead_thread->pid);
1279 			continue;
1280 		}
1281 
1282 		if (dev->dqm->detect_hang_count && !pdd->has_reset_queue)
1283 			continue;
1284 
1285 		if (dev->dqm->detect_hang_count) {
1286 			struct amdgpu_task_info *ti;
1287 			struct amdgpu_fpriv *drv_priv;
1288 
1289 			if (unlikely(amdgpu_file_to_fpriv(pdd->drm_file, &drv_priv))) {
1290 				WARN_ONCE(1, "Could not get vm for device %x from pid:%d\n",
1291 					  dev->id, p->lead_thread->pid);
1292 				continue;
1293 			}
1294 
1295 			ti = amdgpu_vm_get_task_info_vm(&drv_priv->vm);
1296 			if (ti) {
1297 				dev_err(dev->adev->dev,
1298 					"Queues reset on process %s tid %d thread %s pid %d\n",
1299 					ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
1300 				amdgpu_vm_put_task_info(ti);
1301 			}
1302 		}
1303 
1304 		rcu_read_lock();
1305 
1306 		id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1307 		idr_for_each_entry_continue(&p->event_idr, ev, id) {
1308 			if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1309 				spin_lock(&ev->lock);
1310 				ev->hw_exception_data = hw_exception_data;
1311 				ev->hw_exception_data.gpu_id = user_gpu_id;
1312 				set_event(ev);
1313 				spin_unlock(&ev->lock);
1314 			}
1315 			if (ev->type == KFD_EVENT_TYPE_MEMORY &&
1316 			    reset_cause == KFD_HW_EXCEPTION_ECC) {
1317 				spin_lock(&ev->lock);
1318 				ev->memory_exception_data = memory_exception_data;
1319 				ev->memory_exception_data.gpu_id = user_gpu_id;
1320 				set_event(ev);
1321 				spin_unlock(&ev->lock);
1322 			}
1323 		}
1324 
1325 		rcu_read_unlock();
1326 	}
1327 	srcu_read_unlock(&kfd_processes_srcu, idx);
1328 }
1329 
kfd_signal_poison_consumed_event(struct kfd_node * dev,u32 pasid)1330 void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
1331 {
1332 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
1333 	struct kfd_hsa_memory_exception_data memory_exception_data;
1334 	struct kfd_hsa_hw_exception_data hw_exception_data;
1335 	struct kfd_event *ev;
1336 	uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1337 	int user_gpu_id;
1338 
1339 	if (!p) {
1340 		dev_warn(dev->adev->dev, "Not find process with pasid:%d\n", pasid);
1341 		return; /* Presumably process exited. */
1342 	}
1343 
1344 	user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
1345 	if (unlikely(user_gpu_id == -EINVAL)) {
1346 		WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
1347 		kfd_unref_process(p);
1348 		return;
1349 	}
1350 
1351 	memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1352 	hw_exception_data.gpu_id = user_gpu_id;
1353 	hw_exception_data.memory_lost = 1;
1354 	hw_exception_data.reset_cause = KFD_HW_EXCEPTION_ECC;
1355 
1356 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1357 	memory_exception_data.ErrorType = KFD_MEM_ERR_POISON_CONSUMED;
1358 	memory_exception_data.gpu_id = user_gpu_id;
1359 	memory_exception_data.failure.imprecise = true;
1360 
1361 	rcu_read_lock();
1362 
1363 	idr_for_each_entry_continue(&p->event_idr, ev, id) {
1364 		if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1365 			spin_lock(&ev->lock);
1366 			ev->hw_exception_data = hw_exception_data;
1367 			set_event(ev);
1368 			spin_unlock(&ev->lock);
1369 		}
1370 
1371 		if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1372 			spin_lock(&ev->lock);
1373 			ev->memory_exception_data = memory_exception_data;
1374 			set_event(ev);
1375 			spin_unlock(&ev->lock);
1376 		}
1377 	}
1378 
1379 	dev_warn(dev->adev->dev, "Send SIGBUS to process %s(pasid:%d)\n",
1380 		p->lead_thread->comm, pasid);
1381 	rcu_read_unlock();
1382 
1383 	/* user application will handle SIGBUS signal */
1384 	send_sig(SIGBUS, p->lead_thread, 0);
1385 
1386 	kfd_unref_process(p);
1387 }
1388 
1389 /* signal KFD_EVENT_TYPE_SIGNAL events from process p
1390  * send signal SIGBUS to correspondent user space process
1391  */
kfd_signal_process_terminate_event(struct kfd_process * p)1392 void kfd_signal_process_terminate_event(struct kfd_process *p)
1393 {
1394 	struct kfd_event *ev;
1395 	u32 id;
1396 
1397 	rcu_read_lock();
1398 
1399 	/* iterate from id 1 for KFD_EVENT_TYPE_SIGNAL events */
1400 	id = 1;
1401 	idr_for_each_entry_continue(&p->event_idr, ev, id)
1402 		if (ev->type == KFD_EVENT_TYPE_SIGNAL) {
1403 			spin_lock(&ev->lock);
1404 			set_event(ev);
1405 			spin_unlock(&ev->lock);
1406 		}
1407 
1408 	/* Send SIGBUS to p->lead_thread */
1409 	dev_notice(kfd_device,
1410 		   "Sending SIGBUS to process %d",
1411 		   p->lead_thread->pid);
1412 
1413 	send_sig(SIGBUS, p->lead_thread, 0);
1414 
1415 	rcu_read_unlock();
1416 }
1417