1f3a39818SAndrew Lewycky /* 2f3a39818SAndrew Lewycky * Copyright 2014 Advanced Micro Devices, Inc. 3f3a39818SAndrew Lewycky * 4f3a39818SAndrew Lewycky * Permission is hereby granted, free of charge, to any person obtaining a 5f3a39818SAndrew Lewycky * copy of this software and associated documentation files (the "Software"), 6f3a39818SAndrew Lewycky * to deal in the Software without restriction, including without limitation 7f3a39818SAndrew Lewycky * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8f3a39818SAndrew Lewycky * and/or sell copies of the Software, and to permit persons to whom the 9f3a39818SAndrew Lewycky * Software is furnished to do so, subject to the following conditions: 10f3a39818SAndrew Lewycky * 11f3a39818SAndrew Lewycky * The above copyright notice and this permission notice shall be included in 12f3a39818SAndrew Lewycky * all copies or substantial portions of the Software. 13f3a39818SAndrew Lewycky * 14f3a39818SAndrew Lewycky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15f3a39818SAndrew Lewycky * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16f3a39818SAndrew Lewycky * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17f3a39818SAndrew Lewycky * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18f3a39818SAndrew Lewycky * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19f3a39818SAndrew Lewycky * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20f3a39818SAndrew Lewycky * OTHER DEALINGS IN THE SOFTWARE. 21f3a39818SAndrew Lewycky */ 22f3a39818SAndrew Lewycky 23f3a39818SAndrew Lewycky #include <linux/mm_types.h> 24f3a39818SAndrew Lewycky #include <linux/slab.h> 25f3a39818SAndrew Lewycky #include <linux/types.h> 263f07c014SIngo Molnar #include <linux/sched/signal.h> 279b56bb11SFelix Kuehling #include <linux/sched/mm.h> 28f3a39818SAndrew Lewycky #include <linux/uaccess.h> 29f3a39818SAndrew Lewycky #include <linux/mman.h> 30f3a39818SAndrew Lewycky #include <linux/memory.h> 31f3a39818SAndrew Lewycky #include "kfd_priv.h" 32f3a39818SAndrew Lewycky #include "kfd_events.h" 3364d1c3a4SFelix Kuehling #include "kfd_iommu.h" 3459d3e8beSAlexey Skidanov #include <linux/device.h> 35f3a39818SAndrew Lewycky 36f3a39818SAndrew Lewycky /* 3774e40716SFelix Kuehling * Wrapper around wait_queue_entry_t 38f3a39818SAndrew Lewycky */ 39f3a39818SAndrew Lewycky struct kfd_event_waiter { 4074e40716SFelix Kuehling wait_queue_entry_t wait; 4174e40716SFelix Kuehling struct kfd_event *event; /* Event to wait for */ 4274e40716SFelix Kuehling bool activated; /* Becomes true when event is signaled */ 43f3a39818SAndrew Lewycky }; 44f3a39818SAndrew Lewycky 45f3a39818SAndrew Lewycky /* 46f3a39818SAndrew Lewycky * Each signal event needs a 64-bit signal slot where the signaler will write 47482f0777SFelix Kuehling * a 1 before sending an interrupt. (This is needed because some interrupts 48f3a39818SAndrew Lewycky * do not contain enough spare data bits to identify an event.) 49482f0777SFelix Kuehling * We get whole pages and map them to the process VA. 50482f0777SFelix Kuehling * Individual signal events use their event_id as slot index. 51f3a39818SAndrew Lewycky */ 5250cb7dd9SFelix Kuehling struct kfd_signal_page { 53f3a39818SAndrew Lewycky uint64_t *kernel_address; 54f3a39818SAndrew Lewycky uint64_t __user *user_address; 550fc8011fSFelix Kuehling bool need_to_free_pages; 56f3a39818SAndrew Lewycky }; 57f3a39818SAndrew Lewycky 5850cb7dd9SFelix Kuehling static uint64_t *page_slots(struct kfd_signal_page *page) 59f3a39818SAndrew Lewycky { 60f3a39818SAndrew Lewycky return page->kernel_address; 61f3a39818SAndrew Lewycky } 62f3a39818SAndrew Lewycky 6350cb7dd9SFelix Kuehling static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p) 64f3a39818SAndrew Lewycky { 65f3a39818SAndrew Lewycky void *backing_store; 6650cb7dd9SFelix Kuehling struct kfd_signal_page *page; 67f3a39818SAndrew Lewycky 6850cb7dd9SFelix Kuehling page = kzalloc(sizeof(*page), GFP_KERNEL); 69f3a39818SAndrew Lewycky if (!page) 7050cb7dd9SFelix Kuehling return NULL; 71f3a39818SAndrew Lewycky 7250cb7dd9SFelix Kuehling backing_store = (void *) __get_free_pages(GFP_KERNEL, 73f3a39818SAndrew Lewycky get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); 74f3a39818SAndrew Lewycky if (!backing_store) 75f3a39818SAndrew Lewycky goto fail_alloc_signal_store; 76f3a39818SAndrew Lewycky 7750cb7dd9SFelix Kuehling /* Initialize all events to unsignaled */ 78f3a39818SAndrew Lewycky memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT, 79f3a39818SAndrew Lewycky KFD_SIGNAL_EVENT_LIMIT * 8); 80f3a39818SAndrew Lewycky 81f3a39818SAndrew Lewycky page->kernel_address = backing_store; 820fc8011fSFelix Kuehling page->need_to_free_pages = true; 8379775b62SKent Russell pr_debug("Allocated new event signal page at %p, for process %p\n", 84f3a39818SAndrew Lewycky page, p); 85f3a39818SAndrew Lewycky 8650cb7dd9SFelix Kuehling return page; 87f3a39818SAndrew Lewycky 88f3a39818SAndrew Lewycky fail_alloc_signal_store: 89f3a39818SAndrew Lewycky kfree(page); 9050cb7dd9SFelix Kuehling return NULL; 9150cb7dd9SFelix Kuehling } 9250cb7dd9SFelix Kuehling 93482f0777SFelix Kuehling static int allocate_event_notification_slot(struct kfd_process *p, 94*40e8a766SDavid Yat Sin struct kfd_event *ev, 95*40e8a766SDavid Yat Sin const int *restore_id) 9650cb7dd9SFelix Kuehling { 97482f0777SFelix Kuehling int id; 98482f0777SFelix Kuehling 9950cb7dd9SFelix Kuehling if (!p->signal_page) { 10050cb7dd9SFelix Kuehling p->signal_page = allocate_signal_page(p); 10150cb7dd9SFelix Kuehling if (!p->signal_page) 102482f0777SFelix Kuehling return -ENOMEM; 103b9a5d0a5SFelix Kuehling /* Oldest user mode expects 256 event slots */ 104b9a5d0a5SFelix Kuehling p->signal_mapped_size = 256*8; 105f3a39818SAndrew Lewycky } 106f3a39818SAndrew Lewycky 107*40e8a766SDavid Yat Sin if (restore_id) { 108*40e8a766SDavid Yat Sin id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1, 109*40e8a766SDavid Yat Sin GFP_KERNEL); 110*40e8a766SDavid Yat Sin } else { 111b9a5d0a5SFelix Kuehling /* 112b9a5d0a5SFelix Kuehling * Compatibility with old user mode: Only use signal slots 113b9a5d0a5SFelix Kuehling * user mode has mapped, may be less than 114b9a5d0a5SFelix Kuehling * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase 115b9a5d0a5SFelix Kuehling * of the event limit without breaking user mode. 116b9a5d0a5SFelix Kuehling */ 117b9a5d0a5SFelix Kuehling id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8, 118482f0777SFelix Kuehling GFP_KERNEL); 119*40e8a766SDavid Yat Sin } 120482f0777SFelix Kuehling if (id < 0) 121482f0777SFelix Kuehling return id; 122f3a39818SAndrew Lewycky 123482f0777SFelix Kuehling ev->event_id = id; 124482f0777SFelix Kuehling page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT; 125f3a39818SAndrew Lewycky 126482f0777SFelix Kuehling return 0; 127f3a39818SAndrew Lewycky } 128f3a39818SAndrew Lewycky 129f3a39818SAndrew Lewycky /* 130f3a39818SAndrew Lewycky * Assumes that p->event_mutex is held and of course that p is not going 131f3a39818SAndrew Lewycky * away (current or locked). 132f3a39818SAndrew Lewycky */ 133f3a39818SAndrew Lewycky static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id) 134f3a39818SAndrew Lewycky { 135482f0777SFelix Kuehling return idr_find(&p->event_idr, id); 136f3a39818SAndrew Lewycky } 137f3a39818SAndrew Lewycky 1383f04f961SFelix Kuehling /** 1393f04f961SFelix Kuehling * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID 1403f04f961SFelix Kuehling * @p: Pointer to struct kfd_process 1413f04f961SFelix Kuehling * @id: ID to look up 1423f04f961SFelix Kuehling * @bits: Number of valid bits in @id 1433f04f961SFelix Kuehling * 1443f04f961SFelix Kuehling * Finds the first signaled event with a matching partial ID. If no 1453f04f961SFelix Kuehling * matching signaled event is found, returns NULL. In that case the 1463f04f961SFelix Kuehling * caller should assume that the partial ID is invalid and do an 1473f04f961SFelix Kuehling * exhaustive search of all siglaned events. 1483f04f961SFelix Kuehling * 1493f04f961SFelix Kuehling * If multiple events with the same partial ID signal at the same 1503f04f961SFelix Kuehling * time, they will be found one interrupt at a time, not necessarily 1513f04f961SFelix Kuehling * in the same order the interrupts occurred. As long as the number of 1523f04f961SFelix Kuehling * interrupts is correct, all signaled events will be seen by the 1533f04f961SFelix Kuehling * driver. 1543f04f961SFelix Kuehling */ 1553f04f961SFelix Kuehling static struct kfd_event *lookup_signaled_event_by_partial_id( 1563f04f961SFelix Kuehling struct kfd_process *p, uint32_t id, uint32_t bits) 1573f04f961SFelix Kuehling { 1583f04f961SFelix Kuehling struct kfd_event *ev; 1593f04f961SFelix Kuehling 1603f04f961SFelix Kuehling if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT) 1613f04f961SFelix Kuehling return NULL; 1623f04f961SFelix Kuehling 1633f04f961SFelix Kuehling /* Fast path for the common case that @id is not a partial ID 1643f04f961SFelix Kuehling * and we only need a single lookup. 1653f04f961SFelix Kuehling */ 1663f04f961SFelix Kuehling if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) { 1673f04f961SFelix Kuehling if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) 1683f04f961SFelix Kuehling return NULL; 1693f04f961SFelix Kuehling 1703f04f961SFelix Kuehling return idr_find(&p->event_idr, id); 1713f04f961SFelix Kuehling } 1723f04f961SFelix Kuehling 1733f04f961SFelix Kuehling /* General case for partial IDs: Iterate over all matching IDs 1743f04f961SFelix Kuehling * and find the first one that has signaled. 1753f04f961SFelix Kuehling */ 1763f04f961SFelix Kuehling for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) { 1773f04f961SFelix Kuehling if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) 1783f04f961SFelix Kuehling continue; 1793f04f961SFelix Kuehling 1803f04f961SFelix Kuehling ev = idr_find(&p->event_idr, id); 1813f04f961SFelix Kuehling } 1823f04f961SFelix Kuehling 1833f04f961SFelix Kuehling return ev; 1843f04f961SFelix Kuehling } 1853f04f961SFelix Kuehling 186*40e8a766SDavid Yat Sin static int create_signal_event(struct file *devkfd, struct kfd_process *p, 187*40e8a766SDavid Yat Sin struct kfd_event *ev, const int *restore_id) 188f3a39818SAndrew Lewycky { 189482f0777SFelix Kuehling int ret; 190482f0777SFelix Kuehling 191b9a5d0a5SFelix Kuehling if (p->signal_mapped_size && 192b9a5d0a5SFelix Kuehling p->signal_event_count == p->signal_mapped_size / 8) { 193c986169fSFelix Kuehling if (!p->signal_event_limit_reached) { 1948f2e0c03SYong Zhao pr_debug("Signal event wasn't created because limit was reached\n"); 195c986169fSFelix Kuehling p->signal_event_limit_reached = true; 196c986169fSFelix Kuehling } 197482f0777SFelix Kuehling return -ENOSPC; 198f3a39818SAndrew Lewycky } 199f3a39818SAndrew Lewycky 200*40e8a766SDavid Yat Sin ret = allocate_event_notification_slot(p, ev, restore_id); 201482f0777SFelix Kuehling if (ret) { 20279775b62SKent Russell pr_warn("Signal event wasn't created because out of kernel memory\n"); 203482f0777SFelix Kuehling return ret; 204f3a39818SAndrew Lewycky } 205f3a39818SAndrew Lewycky 206f3a39818SAndrew Lewycky p->signal_event_count++; 207f3a39818SAndrew Lewycky 208482f0777SFelix Kuehling ev->user_signal_address = &p->signal_page->user_address[ev->event_id]; 20979775b62SKent Russell pr_debug("Signal event number %zu created with id %d, address %p\n", 2106235e15eSOded Gabbay p->signal_event_count, ev->event_id, 2116235e15eSOded Gabbay ev->user_signal_address); 2126235e15eSOded Gabbay 213f3a39818SAndrew Lewycky return 0; 214f3a39818SAndrew Lewycky } 215f3a39818SAndrew Lewycky 216*40e8a766SDavid Yat Sin static int create_other_event(struct kfd_process *p, struct kfd_event *ev, const int *restore_id) 217f3a39818SAndrew Lewycky { 218*40e8a766SDavid Yat Sin int id; 219*40e8a766SDavid Yat Sin 220*40e8a766SDavid Yat Sin if (restore_id) 221*40e8a766SDavid Yat Sin id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1, 222*40e8a766SDavid Yat Sin GFP_KERNEL); 223*40e8a766SDavid Yat Sin else 224482f0777SFelix Kuehling /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an 225482f0777SFelix Kuehling * intentional integer overflow to -1 without a compiler 226482f0777SFelix Kuehling * warning. idr_alloc treats a negative value as "maximum 227482f0777SFelix Kuehling * signed integer". 228482f0777SFelix Kuehling */ 229*40e8a766SDavid Yat Sin id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID, 230482f0777SFelix Kuehling (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1, 231482f0777SFelix Kuehling GFP_KERNEL); 232482f0777SFelix Kuehling 233482f0777SFelix Kuehling if (id < 0) 234482f0777SFelix Kuehling return id; 235482f0777SFelix Kuehling ev->event_id = id; 236f3a39818SAndrew Lewycky 237f3a39818SAndrew Lewycky return 0; 238f3a39818SAndrew Lewycky } 239f3a39818SAndrew Lewycky 240f3a39818SAndrew Lewycky void kfd_event_init_process(struct kfd_process *p) 241f3a39818SAndrew Lewycky { 242f3a39818SAndrew Lewycky mutex_init(&p->event_mutex); 243482f0777SFelix Kuehling idr_init(&p->event_idr); 24450cb7dd9SFelix Kuehling p->signal_page = NULL; 245f3a39818SAndrew Lewycky p->signal_event_count = 0; 246f3a39818SAndrew Lewycky } 247f3a39818SAndrew Lewycky 248f3a39818SAndrew Lewycky static void destroy_event(struct kfd_process *p, struct kfd_event *ev) 249f3a39818SAndrew Lewycky { 25074e40716SFelix Kuehling struct kfd_event_waiter *waiter; 251fe528c13SFelix Kuehling 25274e40716SFelix Kuehling /* Wake up pending waiters. They will return failure */ 25374e40716SFelix Kuehling list_for_each_entry(waiter, &ev->wq.head, wait.entry) 254fe528c13SFelix Kuehling waiter->event = NULL; 25574e40716SFelix Kuehling wake_up_all(&ev->wq); 256fe528c13SFelix Kuehling 257482f0777SFelix Kuehling if (ev->type == KFD_EVENT_TYPE_SIGNAL || 258482f0777SFelix Kuehling ev->type == KFD_EVENT_TYPE_DEBUG) 259f3a39818SAndrew Lewycky p->signal_event_count--; 260f3a39818SAndrew Lewycky 261482f0777SFelix Kuehling idr_remove(&p->event_idr, ev->event_id); 262f3a39818SAndrew Lewycky kfree(ev); 263f3a39818SAndrew Lewycky } 264f3a39818SAndrew Lewycky 265f3a39818SAndrew Lewycky static void destroy_events(struct kfd_process *p) 266f3a39818SAndrew Lewycky { 267f3a39818SAndrew Lewycky struct kfd_event *ev; 268482f0777SFelix Kuehling uint32_t id; 269f3a39818SAndrew Lewycky 270482f0777SFelix Kuehling idr_for_each_entry(&p->event_idr, ev, id) 271f3a39818SAndrew Lewycky destroy_event(p, ev); 272482f0777SFelix Kuehling idr_destroy(&p->event_idr); 273f3a39818SAndrew Lewycky } 274f3a39818SAndrew Lewycky 275f3a39818SAndrew Lewycky /* 276f3a39818SAndrew Lewycky * We assume that the process is being destroyed and there is no need to 277f3a39818SAndrew Lewycky * unmap the pages or keep bookkeeping data in order. 278f3a39818SAndrew Lewycky */ 27950cb7dd9SFelix Kuehling static void shutdown_signal_page(struct kfd_process *p) 280f3a39818SAndrew Lewycky { 28150cb7dd9SFelix Kuehling struct kfd_signal_page *page = p->signal_page; 282f3a39818SAndrew Lewycky 28350cb7dd9SFelix Kuehling if (page) { 2840fc8011fSFelix Kuehling if (page->need_to_free_pages) 285f3a39818SAndrew Lewycky free_pages((unsigned long)page->kernel_address, 286f3a39818SAndrew Lewycky get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); 287f3a39818SAndrew Lewycky kfree(page); 288f3a39818SAndrew Lewycky } 289f3a39818SAndrew Lewycky } 290f3a39818SAndrew Lewycky 291f3a39818SAndrew Lewycky void kfd_event_free_process(struct kfd_process *p) 292f3a39818SAndrew Lewycky { 293f3a39818SAndrew Lewycky destroy_events(p); 29450cb7dd9SFelix Kuehling shutdown_signal_page(p); 295f3a39818SAndrew Lewycky } 296f3a39818SAndrew Lewycky 297f3a39818SAndrew Lewycky static bool event_can_be_gpu_signaled(const struct kfd_event *ev) 298f3a39818SAndrew Lewycky { 299f3a39818SAndrew Lewycky return ev->type == KFD_EVENT_TYPE_SIGNAL || 300f3a39818SAndrew Lewycky ev->type == KFD_EVENT_TYPE_DEBUG; 301f3a39818SAndrew Lewycky } 302f3a39818SAndrew Lewycky 303f3a39818SAndrew Lewycky static bool event_can_be_cpu_signaled(const struct kfd_event *ev) 304f3a39818SAndrew Lewycky { 305f3a39818SAndrew Lewycky return ev->type == KFD_EVENT_TYPE_SIGNAL; 306f3a39818SAndrew Lewycky } 307f3a39818SAndrew Lewycky 308*40e8a766SDavid Yat Sin static int kfd_event_page_set(struct kfd_process *p, void *kernel_address, 309*40e8a766SDavid Yat Sin uint64_t size, uint64_t user_handle) 3100fc8011fSFelix Kuehling { 3110fc8011fSFelix Kuehling struct kfd_signal_page *page; 3120fc8011fSFelix Kuehling 3130fc8011fSFelix Kuehling if (p->signal_page) 3140fc8011fSFelix Kuehling return -EBUSY; 3150fc8011fSFelix Kuehling 3160fc8011fSFelix Kuehling page = kzalloc(sizeof(*page), GFP_KERNEL); 3170fc8011fSFelix Kuehling if (!page) 3180fc8011fSFelix Kuehling return -ENOMEM; 3190fc8011fSFelix Kuehling 3200fc8011fSFelix Kuehling /* Initialize all events to unsignaled */ 3210fc8011fSFelix Kuehling memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT, 3220fc8011fSFelix Kuehling KFD_SIGNAL_EVENT_LIMIT * 8); 3230fc8011fSFelix Kuehling 3240fc8011fSFelix Kuehling page->kernel_address = kernel_address; 3250fc8011fSFelix Kuehling 3260fc8011fSFelix Kuehling p->signal_page = page; 3270fc8011fSFelix Kuehling p->signal_mapped_size = size; 328*40e8a766SDavid Yat Sin p->signal_handle = user_handle; 3290fc8011fSFelix Kuehling return 0; 3300fc8011fSFelix Kuehling } 3310fc8011fSFelix Kuehling 332*40e8a766SDavid Yat Sin int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset) 333*40e8a766SDavid Yat Sin { 334*40e8a766SDavid Yat Sin struct kfd_dev *kfd; 335*40e8a766SDavid Yat Sin struct kfd_process_device *pdd; 336*40e8a766SDavid Yat Sin void *mem, *kern_addr; 337*40e8a766SDavid Yat Sin uint64_t size; 338*40e8a766SDavid Yat Sin int err = 0; 339*40e8a766SDavid Yat Sin 340*40e8a766SDavid Yat Sin if (p->signal_page) { 341*40e8a766SDavid Yat Sin pr_err("Event page is already set\n"); 342*40e8a766SDavid Yat Sin return -EINVAL; 343*40e8a766SDavid Yat Sin } 344*40e8a766SDavid Yat Sin 345*40e8a766SDavid Yat Sin kfd = kfd_device_by_id(GET_GPU_ID(event_page_offset)); 346*40e8a766SDavid Yat Sin if (!kfd) { 347*40e8a766SDavid Yat Sin pr_err("Getting device by id failed in %s\n", __func__); 348*40e8a766SDavid Yat Sin return -EINVAL; 349*40e8a766SDavid Yat Sin } 350*40e8a766SDavid Yat Sin 351*40e8a766SDavid Yat Sin pdd = kfd_bind_process_to_device(kfd, p); 352*40e8a766SDavid Yat Sin if (IS_ERR(pdd)) 353*40e8a766SDavid Yat Sin return PTR_ERR(pdd); 354*40e8a766SDavid Yat Sin 355*40e8a766SDavid Yat Sin mem = kfd_process_device_translate_handle(pdd, 356*40e8a766SDavid Yat Sin GET_IDR_HANDLE(event_page_offset)); 357*40e8a766SDavid Yat Sin if (!mem) { 358*40e8a766SDavid Yat Sin pr_err("Can't find BO, offset is 0x%llx\n", event_page_offset); 359*40e8a766SDavid Yat Sin return -EINVAL; 360*40e8a766SDavid Yat Sin } 361*40e8a766SDavid Yat Sin 362*40e8a766SDavid Yat Sin err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev, 363*40e8a766SDavid Yat Sin mem, &kern_addr, &size); 364*40e8a766SDavid Yat Sin if (err) { 365*40e8a766SDavid Yat Sin pr_err("Failed to map event page to kernel\n"); 366*40e8a766SDavid Yat Sin return err; 367*40e8a766SDavid Yat Sin } 368*40e8a766SDavid Yat Sin 369*40e8a766SDavid Yat Sin err = kfd_event_page_set(p, kern_addr, size, event_page_offset); 370*40e8a766SDavid Yat Sin if (err) { 371*40e8a766SDavid Yat Sin pr_err("Failed to set event page\n"); 372*40e8a766SDavid Yat Sin amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem); 373*40e8a766SDavid Yat Sin return err; 374*40e8a766SDavid Yat Sin } 375*40e8a766SDavid Yat Sin return err; 376*40e8a766SDavid Yat Sin } 377*40e8a766SDavid Yat Sin 378f3a39818SAndrew Lewycky int kfd_event_create(struct file *devkfd, struct kfd_process *p, 379f3a39818SAndrew Lewycky uint32_t event_type, bool auto_reset, uint32_t node_id, 380f3a39818SAndrew Lewycky uint32_t *event_id, uint32_t *event_trigger_data, 381f3a39818SAndrew Lewycky uint64_t *event_page_offset, uint32_t *event_slot_index) 382f3a39818SAndrew Lewycky { 383f3a39818SAndrew Lewycky int ret = 0; 384f3a39818SAndrew Lewycky struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL); 385f3a39818SAndrew Lewycky 386f3a39818SAndrew Lewycky if (!ev) 387f3a39818SAndrew Lewycky return -ENOMEM; 388f3a39818SAndrew Lewycky 389f3a39818SAndrew Lewycky ev->type = event_type; 390f3a39818SAndrew Lewycky ev->auto_reset = auto_reset; 391f3a39818SAndrew Lewycky ev->signaled = false; 392f3a39818SAndrew Lewycky 39374e40716SFelix Kuehling init_waitqueue_head(&ev->wq); 394f3a39818SAndrew Lewycky 395f3a39818SAndrew Lewycky *event_page_offset = 0; 396f3a39818SAndrew Lewycky 397f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 398f3a39818SAndrew Lewycky 399f3a39818SAndrew Lewycky switch (event_type) { 400f3a39818SAndrew Lewycky case KFD_EVENT_TYPE_SIGNAL: 401f3a39818SAndrew Lewycky case KFD_EVENT_TYPE_DEBUG: 402*40e8a766SDavid Yat Sin ret = create_signal_event(devkfd, p, ev, NULL); 403f3a39818SAndrew Lewycky if (!ret) { 404df03ef93SHarish Kasiviswanathan *event_page_offset = KFD_MMAP_TYPE_EVENTS; 405482f0777SFelix Kuehling *event_slot_index = ev->event_id; 406f3a39818SAndrew Lewycky } 407f3a39818SAndrew Lewycky break; 408f3a39818SAndrew Lewycky default: 409*40e8a766SDavid Yat Sin ret = create_other_event(p, ev, NULL); 410f3a39818SAndrew Lewycky break; 411f3a39818SAndrew Lewycky } 412f3a39818SAndrew Lewycky 413f3a39818SAndrew Lewycky if (!ret) { 414f3a39818SAndrew Lewycky *event_id = ev->event_id; 415f3a39818SAndrew Lewycky *event_trigger_data = ev->event_id; 416f3a39818SAndrew Lewycky } else { 417f3a39818SAndrew Lewycky kfree(ev); 418f3a39818SAndrew Lewycky } 419f3a39818SAndrew Lewycky 420f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 421f3a39818SAndrew Lewycky 422f3a39818SAndrew Lewycky return ret; 423f3a39818SAndrew Lewycky } 424f3a39818SAndrew Lewycky 425*40e8a766SDavid Yat Sin int kfd_criu_restore_event(struct file *devkfd, 426*40e8a766SDavid Yat Sin struct kfd_process *p, 427*40e8a766SDavid Yat Sin uint8_t __user *user_priv_ptr, 428*40e8a766SDavid Yat Sin uint64_t *priv_data_offset, 429*40e8a766SDavid Yat Sin uint64_t max_priv_data_size) 430*40e8a766SDavid Yat Sin { 431*40e8a766SDavid Yat Sin struct kfd_criu_event_priv_data *ev_priv; 432*40e8a766SDavid Yat Sin struct kfd_event *ev = NULL; 433*40e8a766SDavid Yat Sin int ret = 0; 434*40e8a766SDavid Yat Sin 435*40e8a766SDavid Yat Sin ev_priv = kmalloc(sizeof(*ev_priv), GFP_KERNEL); 436*40e8a766SDavid Yat Sin if (!ev_priv) 437*40e8a766SDavid Yat Sin return -ENOMEM; 438*40e8a766SDavid Yat Sin 439*40e8a766SDavid Yat Sin ev = kzalloc(sizeof(*ev), GFP_KERNEL); 440*40e8a766SDavid Yat Sin if (!ev) { 441*40e8a766SDavid Yat Sin ret = -ENOMEM; 442*40e8a766SDavid Yat Sin goto exit; 443*40e8a766SDavid Yat Sin } 444*40e8a766SDavid Yat Sin 445*40e8a766SDavid Yat Sin if (*priv_data_offset + sizeof(*ev_priv) > max_priv_data_size) { 446*40e8a766SDavid Yat Sin ret = -EINVAL; 447*40e8a766SDavid Yat Sin goto exit; 448*40e8a766SDavid Yat Sin } 449*40e8a766SDavid Yat Sin 450*40e8a766SDavid Yat Sin ret = copy_from_user(ev_priv, user_priv_ptr + *priv_data_offset, sizeof(*ev_priv)); 451*40e8a766SDavid Yat Sin if (ret) { 452*40e8a766SDavid Yat Sin ret = -EFAULT; 453*40e8a766SDavid Yat Sin goto exit; 454*40e8a766SDavid Yat Sin } 455*40e8a766SDavid Yat Sin *priv_data_offset += sizeof(*ev_priv); 456*40e8a766SDavid Yat Sin 457*40e8a766SDavid Yat Sin if (ev_priv->user_handle) { 458*40e8a766SDavid Yat Sin ret = kfd_kmap_event_page(p, ev_priv->user_handle); 459*40e8a766SDavid Yat Sin if (ret) 460*40e8a766SDavid Yat Sin goto exit; 461*40e8a766SDavid Yat Sin } 462*40e8a766SDavid Yat Sin 463*40e8a766SDavid Yat Sin ev->type = ev_priv->type; 464*40e8a766SDavid Yat Sin ev->auto_reset = ev_priv->auto_reset; 465*40e8a766SDavid Yat Sin ev->signaled = ev_priv->signaled; 466*40e8a766SDavid Yat Sin 467*40e8a766SDavid Yat Sin init_waitqueue_head(&ev->wq); 468*40e8a766SDavid Yat Sin 469*40e8a766SDavid Yat Sin mutex_lock(&p->event_mutex); 470*40e8a766SDavid Yat Sin switch (ev->type) { 471*40e8a766SDavid Yat Sin case KFD_EVENT_TYPE_SIGNAL: 472*40e8a766SDavid Yat Sin case KFD_EVENT_TYPE_DEBUG: 473*40e8a766SDavid Yat Sin ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id); 474*40e8a766SDavid Yat Sin break; 475*40e8a766SDavid Yat Sin case KFD_EVENT_TYPE_MEMORY: 476*40e8a766SDavid Yat Sin memcpy(&ev->memory_exception_data, 477*40e8a766SDavid Yat Sin &ev_priv->memory_exception_data, 478*40e8a766SDavid Yat Sin sizeof(struct kfd_hsa_memory_exception_data)); 479*40e8a766SDavid Yat Sin 480*40e8a766SDavid Yat Sin ret = create_other_event(p, ev, &ev_priv->event_id); 481*40e8a766SDavid Yat Sin break; 482*40e8a766SDavid Yat Sin case KFD_EVENT_TYPE_HW_EXCEPTION: 483*40e8a766SDavid Yat Sin memcpy(&ev->hw_exception_data, 484*40e8a766SDavid Yat Sin &ev_priv->hw_exception_data, 485*40e8a766SDavid Yat Sin sizeof(struct kfd_hsa_hw_exception_data)); 486*40e8a766SDavid Yat Sin 487*40e8a766SDavid Yat Sin ret = create_other_event(p, ev, &ev_priv->event_id); 488*40e8a766SDavid Yat Sin break; 489*40e8a766SDavid Yat Sin } 490*40e8a766SDavid Yat Sin 491*40e8a766SDavid Yat Sin exit: 492*40e8a766SDavid Yat Sin if (ret) 493*40e8a766SDavid Yat Sin kfree(ev); 494*40e8a766SDavid Yat Sin 495*40e8a766SDavid Yat Sin kfree(ev_priv); 496*40e8a766SDavid Yat Sin 497*40e8a766SDavid Yat Sin mutex_unlock(&p->event_mutex); 498*40e8a766SDavid Yat Sin 499*40e8a766SDavid Yat Sin return ret; 500*40e8a766SDavid Yat Sin } 501*40e8a766SDavid Yat Sin 502*40e8a766SDavid Yat Sin int kfd_criu_checkpoint_events(struct kfd_process *p, 503*40e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 504*40e8a766SDavid Yat Sin uint64_t *priv_data_offset) 505*40e8a766SDavid Yat Sin { 506*40e8a766SDavid Yat Sin struct kfd_criu_event_priv_data *ev_privs; 507*40e8a766SDavid Yat Sin int i = 0; 508*40e8a766SDavid Yat Sin int ret = 0; 509*40e8a766SDavid Yat Sin struct kfd_event *ev; 510*40e8a766SDavid Yat Sin uint32_t ev_id; 511*40e8a766SDavid Yat Sin 512*40e8a766SDavid Yat Sin uint32_t num_events = kfd_get_num_events(p); 513*40e8a766SDavid Yat Sin 514*40e8a766SDavid Yat Sin if (!num_events) 515*40e8a766SDavid Yat Sin return 0; 516*40e8a766SDavid Yat Sin 517*40e8a766SDavid Yat Sin ev_privs = kvzalloc(num_events * sizeof(*ev_privs), GFP_KERNEL); 518*40e8a766SDavid Yat Sin if (!ev_privs) 519*40e8a766SDavid Yat Sin return -ENOMEM; 520*40e8a766SDavid Yat Sin 521*40e8a766SDavid Yat Sin 522*40e8a766SDavid Yat Sin idr_for_each_entry(&p->event_idr, ev, ev_id) { 523*40e8a766SDavid Yat Sin struct kfd_criu_event_priv_data *ev_priv; 524*40e8a766SDavid Yat Sin 525*40e8a766SDavid Yat Sin /* 526*40e8a766SDavid Yat Sin * Currently, all events have same size of private_data, but the current ioctl's 527*40e8a766SDavid Yat Sin * and CRIU plugin supports private_data of variable sizes 528*40e8a766SDavid Yat Sin */ 529*40e8a766SDavid Yat Sin ev_priv = &ev_privs[i]; 530*40e8a766SDavid Yat Sin 531*40e8a766SDavid Yat Sin ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT; 532*40e8a766SDavid Yat Sin 533*40e8a766SDavid Yat Sin /* We store the user_handle with the first event */ 534*40e8a766SDavid Yat Sin if (i == 0 && p->signal_page) 535*40e8a766SDavid Yat Sin ev_priv->user_handle = p->signal_handle; 536*40e8a766SDavid Yat Sin 537*40e8a766SDavid Yat Sin ev_priv->event_id = ev->event_id; 538*40e8a766SDavid Yat Sin ev_priv->auto_reset = ev->auto_reset; 539*40e8a766SDavid Yat Sin ev_priv->type = ev->type; 540*40e8a766SDavid Yat Sin ev_priv->signaled = ev->signaled; 541*40e8a766SDavid Yat Sin 542*40e8a766SDavid Yat Sin if (ev_priv->type == KFD_EVENT_TYPE_MEMORY) 543*40e8a766SDavid Yat Sin memcpy(&ev_priv->memory_exception_data, 544*40e8a766SDavid Yat Sin &ev->memory_exception_data, 545*40e8a766SDavid Yat Sin sizeof(struct kfd_hsa_memory_exception_data)); 546*40e8a766SDavid Yat Sin else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION) 547*40e8a766SDavid Yat Sin memcpy(&ev_priv->hw_exception_data, 548*40e8a766SDavid Yat Sin &ev->hw_exception_data, 549*40e8a766SDavid Yat Sin sizeof(struct kfd_hsa_hw_exception_data)); 550*40e8a766SDavid Yat Sin 551*40e8a766SDavid Yat Sin pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n", 552*40e8a766SDavid Yat Sin i, 553*40e8a766SDavid Yat Sin ev_priv->event_id, 554*40e8a766SDavid Yat Sin ev_priv->auto_reset, 555*40e8a766SDavid Yat Sin ev_priv->type, 556*40e8a766SDavid Yat Sin ev_priv->signaled); 557*40e8a766SDavid Yat Sin i++; 558*40e8a766SDavid Yat Sin } 559*40e8a766SDavid Yat Sin 560*40e8a766SDavid Yat Sin ret = copy_to_user(user_priv_data + *priv_data_offset, 561*40e8a766SDavid Yat Sin ev_privs, num_events * sizeof(*ev_privs)); 562*40e8a766SDavid Yat Sin if (ret) { 563*40e8a766SDavid Yat Sin pr_err("Failed to copy events priv to user\n"); 564*40e8a766SDavid Yat Sin ret = -EFAULT; 565*40e8a766SDavid Yat Sin } 566*40e8a766SDavid Yat Sin 567*40e8a766SDavid Yat Sin *priv_data_offset += num_events * sizeof(*ev_privs); 568*40e8a766SDavid Yat Sin 569*40e8a766SDavid Yat Sin kvfree(ev_privs); 570*40e8a766SDavid Yat Sin return ret; 571*40e8a766SDavid Yat Sin } 572*40e8a766SDavid Yat Sin 573*40e8a766SDavid Yat Sin int kfd_get_num_events(struct kfd_process *p) 574*40e8a766SDavid Yat Sin { 575*40e8a766SDavid Yat Sin struct kfd_event *ev; 576*40e8a766SDavid Yat Sin uint32_t id; 577*40e8a766SDavid Yat Sin u32 num_events = 0; 578*40e8a766SDavid Yat Sin 579*40e8a766SDavid Yat Sin idr_for_each_entry(&p->event_idr, ev, id) 580*40e8a766SDavid Yat Sin num_events++; 581*40e8a766SDavid Yat Sin 582*40e8a766SDavid Yat Sin return num_events; 583*40e8a766SDavid Yat Sin } 584*40e8a766SDavid Yat Sin 585f3a39818SAndrew Lewycky /* Assumes that p is current. */ 586f3a39818SAndrew Lewycky int kfd_event_destroy(struct kfd_process *p, uint32_t event_id) 587f3a39818SAndrew Lewycky { 588f3a39818SAndrew Lewycky struct kfd_event *ev; 589f3a39818SAndrew Lewycky int ret = 0; 590f3a39818SAndrew Lewycky 591f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 592f3a39818SAndrew Lewycky 593f3a39818SAndrew Lewycky ev = lookup_event_by_id(p, event_id); 594f3a39818SAndrew Lewycky 595f3a39818SAndrew Lewycky if (ev) 596f3a39818SAndrew Lewycky destroy_event(p, ev); 597f3a39818SAndrew Lewycky else 598f3a39818SAndrew Lewycky ret = -EINVAL; 599f3a39818SAndrew Lewycky 600f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 601f3a39818SAndrew Lewycky return ret; 602f3a39818SAndrew Lewycky } 603f3a39818SAndrew Lewycky 604f3a39818SAndrew Lewycky static void set_event(struct kfd_event *ev) 605f3a39818SAndrew Lewycky { 606f3a39818SAndrew Lewycky struct kfd_event_waiter *waiter; 607f3a39818SAndrew Lewycky 60874e40716SFelix Kuehling /* Auto reset if the list is non-empty and we're waking 60974e40716SFelix Kuehling * someone. waitqueue_active is safe here because we're 61074e40716SFelix Kuehling * protected by the p->event_mutex, which is also held when 61174e40716SFelix Kuehling * updating the wait queues in kfd_wait_on_events. 61274e40716SFelix Kuehling */ 61374e40716SFelix Kuehling ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); 614f3a39818SAndrew Lewycky 61574e40716SFelix Kuehling list_for_each_entry(waiter, &ev->wq.head, wait.entry) 616f3a39818SAndrew Lewycky waiter->activated = true; 617f3a39818SAndrew Lewycky 61874e40716SFelix Kuehling wake_up_all(&ev->wq); 619f3a39818SAndrew Lewycky } 620f3a39818SAndrew Lewycky 621f3a39818SAndrew Lewycky /* Assumes that p is current. */ 622f3a39818SAndrew Lewycky int kfd_set_event(struct kfd_process *p, uint32_t event_id) 623f3a39818SAndrew Lewycky { 624f3a39818SAndrew Lewycky int ret = 0; 625f3a39818SAndrew Lewycky struct kfd_event *ev; 626f3a39818SAndrew Lewycky 627f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 628f3a39818SAndrew Lewycky 629f3a39818SAndrew Lewycky ev = lookup_event_by_id(p, event_id); 630f3a39818SAndrew Lewycky 631f3a39818SAndrew Lewycky if (ev && event_can_be_cpu_signaled(ev)) 632f3a39818SAndrew Lewycky set_event(ev); 633f3a39818SAndrew Lewycky else 634f3a39818SAndrew Lewycky ret = -EINVAL; 635f3a39818SAndrew Lewycky 636f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 637f3a39818SAndrew Lewycky return ret; 638f3a39818SAndrew Lewycky } 639f3a39818SAndrew Lewycky 640f3a39818SAndrew Lewycky static void reset_event(struct kfd_event *ev) 641f3a39818SAndrew Lewycky { 642f3a39818SAndrew Lewycky ev->signaled = false; 643f3a39818SAndrew Lewycky } 644f3a39818SAndrew Lewycky 645f3a39818SAndrew Lewycky /* Assumes that p is current. */ 646f3a39818SAndrew Lewycky int kfd_reset_event(struct kfd_process *p, uint32_t event_id) 647f3a39818SAndrew Lewycky { 648f3a39818SAndrew Lewycky int ret = 0; 649f3a39818SAndrew Lewycky struct kfd_event *ev; 650f3a39818SAndrew Lewycky 651f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 652f3a39818SAndrew Lewycky 653f3a39818SAndrew Lewycky ev = lookup_event_by_id(p, event_id); 654f3a39818SAndrew Lewycky 655f3a39818SAndrew Lewycky if (ev && event_can_be_cpu_signaled(ev)) 656f3a39818SAndrew Lewycky reset_event(ev); 657f3a39818SAndrew Lewycky else 658f3a39818SAndrew Lewycky ret = -EINVAL; 659f3a39818SAndrew Lewycky 660f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 661f3a39818SAndrew Lewycky return ret; 662f3a39818SAndrew Lewycky 663f3a39818SAndrew Lewycky } 664f3a39818SAndrew Lewycky 665f3a39818SAndrew Lewycky static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev) 666f3a39818SAndrew Lewycky { 667482f0777SFelix Kuehling page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT; 668f3a39818SAndrew Lewycky } 669f3a39818SAndrew Lewycky 670f3a39818SAndrew Lewycky static void set_event_from_interrupt(struct kfd_process *p, 671f3a39818SAndrew Lewycky struct kfd_event *ev) 672f3a39818SAndrew Lewycky { 673f3a39818SAndrew Lewycky if (ev && event_can_be_gpu_signaled(ev)) { 674f3a39818SAndrew Lewycky acknowledge_signal(p, ev); 675f3a39818SAndrew Lewycky set_event(ev); 676f3a39818SAndrew Lewycky } 677f3a39818SAndrew Lewycky } 678f3a39818SAndrew Lewycky 679c7b6bac9SFenghua Yu void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 680f3a39818SAndrew Lewycky uint32_t valid_id_bits) 681f3a39818SAndrew Lewycky { 6823f04f961SFelix Kuehling struct kfd_event *ev = NULL; 683f3a39818SAndrew Lewycky 684f3a39818SAndrew Lewycky /* 685f3a39818SAndrew Lewycky * Because we are called from arbitrary context (workqueue) as opposed 686f3a39818SAndrew Lewycky * to process context, kfd_process could attempt to exit while we are 687abb208a8SFelix Kuehling * running so the lookup function increments the process ref count. 688f3a39818SAndrew Lewycky */ 689f3a39818SAndrew Lewycky struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 690f3a39818SAndrew Lewycky 691f3a39818SAndrew Lewycky if (!p) 692f3a39818SAndrew Lewycky return; /* Presumably process exited. */ 693f3a39818SAndrew Lewycky 694f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 695f3a39818SAndrew Lewycky 6963f04f961SFelix Kuehling if (valid_id_bits) 6973f04f961SFelix Kuehling ev = lookup_signaled_event_by_partial_id(p, partial_id, 6983f04f961SFelix Kuehling valid_id_bits); 6993f04f961SFelix Kuehling if (ev) { 700f3a39818SAndrew Lewycky set_event_from_interrupt(p, ev); 70150cb7dd9SFelix Kuehling } else if (p->signal_page) { 702f3a39818SAndrew Lewycky /* 7033f04f961SFelix Kuehling * Partial ID lookup failed. Assume that the event ID 7043f04f961SFelix Kuehling * in the interrupt payload was invalid and do an 7053f04f961SFelix Kuehling * exhaustive search of signaled events. 706f3a39818SAndrew Lewycky */ 707482f0777SFelix Kuehling uint64_t *slots = page_slots(p->signal_page); 708482f0777SFelix Kuehling uint32_t id; 709f3a39818SAndrew Lewycky 7103f04f961SFelix Kuehling if (valid_id_bits) 7113f04f961SFelix Kuehling pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", 7123f04f961SFelix Kuehling partial_id, valid_id_bits); 7133f04f961SFelix Kuehling 714eeb27b7eSFelix Kuehling if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) { 715482f0777SFelix Kuehling /* With relatively few events, it's faster to 716482f0777SFelix Kuehling * iterate over the event IDR 717482f0777SFelix Kuehling */ 718482f0777SFelix Kuehling idr_for_each_entry(&p->event_idr, ev, id) { 719482f0777SFelix Kuehling if (id >= KFD_SIGNAL_EVENT_LIMIT) 720482f0777SFelix Kuehling break; 721482f0777SFelix Kuehling 722482f0777SFelix Kuehling if (slots[id] != UNSIGNALED_EVENT_SLOT) 723f3a39818SAndrew Lewycky set_event_from_interrupt(p, ev); 724f3a39818SAndrew Lewycky } 725482f0777SFelix Kuehling } else { 726482f0777SFelix Kuehling /* With relatively many events, it's faster to 727482f0777SFelix Kuehling * iterate over the signal slots and lookup 728482f0777SFelix Kuehling * only signaled events from the IDR. 729482f0777SFelix Kuehling */ 730482f0777SFelix Kuehling for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++) 731482f0777SFelix Kuehling if (slots[id] != UNSIGNALED_EVENT_SLOT) { 732482f0777SFelix Kuehling ev = lookup_event_by_id(p, id); 733482f0777SFelix Kuehling set_event_from_interrupt(p, ev); 734482f0777SFelix Kuehling } 735482f0777SFelix Kuehling } 736f3a39818SAndrew Lewycky } 737f3a39818SAndrew Lewycky 738f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 739abb208a8SFelix Kuehling kfd_unref_process(p); 740f3a39818SAndrew Lewycky } 741f3a39818SAndrew Lewycky 742f3a39818SAndrew Lewycky static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) 743f3a39818SAndrew Lewycky { 744f3a39818SAndrew Lewycky struct kfd_event_waiter *event_waiters; 745f3a39818SAndrew Lewycky uint32_t i; 746f3a39818SAndrew Lewycky 747f3a39818SAndrew Lewycky event_waiters = kmalloc_array(num_events, 748f3a39818SAndrew Lewycky sizeof(struct kfd_event_waiter), 749f3a39818SAndrew Lewycky GFP_KERNEL); 750f3a39818SAndrew Lewycky 751f3a39818SAndrew Lewycky for (i = 0; (event_waiters) && (i < num_events) ; i++) { 75274e40716SFelix Kuehling init_wait(&event_waiters[i].wait); 753f3a39818SAndrew Lewycky event_waiters[i].activated = false; 754f3a39818SAndrew Lewycky } 755f3a39818SAndrew Lewycky 756f3a39818SAndrew Lewycky return event_waiters; 757f3a39818SAndrew Lewycky } 758f3a39818SAndrew Lewycky 7591f9d09beSSean Keely static int init_event_waiter_get_status(struct kfd_process *p, 760f3a39818SAndrew Lewycky struct kfd_event_waiter *waiter, 761ebf947feSFelix Kuehling uint32_t event_id) 762f3a39818SAndrew Lewycky { 763f3a39818SAndrew Lewycky struct kfd_event *ev = lookup_event_by_id(p, event_id); 764f3a39818SAndrew Lewycky 765f3a39818SAndrew Lewycky if (!ev) 766f3a39818SAndrew Lewycky return -EINVAL; 767f3a39818SAndrew Lewycky 76859d3e8beSAlexey Skidanov waiter->event = ev; 769f3a39818SAndrew Lewycky waiter->activated = ev->signaled; 770f3a39818SAndrew Lewycky ev->signaled = ev->signaled && !ev->auto_reset; 771f3a39818SAndrew Lewycky 772f3a39818SAndrew Lewycky return 0; 773f3a39818SAndrew Lewycky } 774f3a39818SAndrew Lewycky 7751f9d09beSSean Keely static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter) 7761f9d09beSSean Keely { 7771f9d09beSSean Keely struct kfd_event *ev = waiter->event; 7781f9d09beSSean Keely 7791f9d09beSSean Keely /* Only add to the wait list if we actually need to 7801f9d09beSSean Keely * wait on this event. 7811f9d09beSSean Keely */ 7821f9d09beSSean Keely if (!waiter->activated) 78374e40716SFelix Kuehling add_wait_queue(&ev->wq, &waiter->wait); 7841f9d09beSSean Keely } 7851f9d09beSSean Keely 786fe528c13SFelix Kuehling /* test_event_condition - Test condition of events being waited for 787fe528c13SFelix Kuehling * @all: Return completion only if all events have signaled 788fe528c13SFelix Kuehling * @num_events: Number of events to wait for 789fe528c13SFelix Kuehling * @event_waiters: Array of event waiters, one per event 790fe528c13SFelix Kuehling * 791fe528c13SFelix Kuehling * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have 792fe528c13SFelix Kuehling * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all) 793fe528c13SFelix Kuehling * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of 794fe528c13SFelix Kuehling * the events have been destroyed. 795fe528c13SFelix Kuehling */ 796fe528c13SFelix Kuehling static uint32_t test_event_condition(bool all, uint32_t num_events, 797f3a39818SAndrew Lewycky struct kfd_event_waiter *event_waiters) 798f3a39818SAndrew Lewycky { 799f3a39818SAndrew Lewycky uint32_t i; 800f3a39818SAndrew Lewycky uint32_t activated_count = 0; 801f3a39818SAndrew Lewycky 802f3a39818SAndrew Lewycky for (i = 0; i < num_events; i++) { 803fe528c13SFelix Kuehling if (!event_waiters[i].event) 804fe528c13SFelix Kuehling return KFD_IOC_WAIT_RESULT_FAIL; 805fe528c13SFelix Kuehling 806f3a39818SAndrew Lewycky if (event_waiters[i].activated) { 807f3a39818SAndrew Lewycky if (!all) 808fe528c13SFelix Kuehling return KFD_IOC_WAIT_RESULT_COMPLETE; 809f3a39818SAndrew Lewycky 810f3a39818SAndrew Lewycky activated_count++; 811f3a39818SAndrew Lewycky } 812f3a39818SAndrew Lewycky } 813f3a39818SAndrew Lewycky 814fe528c13SFelix Kuehling return activated_count == num_events ? 815fe528c13SFelix Kuehling KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT; 816f3a39818SAndrew Lewycky } 817f3a39818SAndrew Lewycky 81859d3e8beSAlexey Skidanov /* 81959d3e8beSAlexey Skidanov * Copy event specific data, if defined. 82059d3e8beSAlexey Skidanov * Currently only memory exception events have additional data to copy to user 82159d3e8beSAlexey Skidanov */ 822fdf0c833SFelix Kuehling static int copy_signaled_event_data(uint32_t num_events, 82359d3e8beSAlexey Skidanov struct kfd_event_waiter *event_waiters, 82459d3e8beSAlexey Skidanov struct kfd_event_data __user *data) 82559d3e8beSAlexey Skidanov { 82659d3e8beSAlexey Skidanov struct kfd_hsa_memory_exception_data *src; 82759d3e8beSAlexey Skidanov struct kfd_hsa_memory_exception_data __user *dst; 82859d3e8beSAlexey Skidanov struct kfd_event_waiter *waiter; 82959d3e8beSAlexey Skidanov struct kfd_event *event; 83059d3e8beSAlexey Skidanov uint32_t i; 83159d3e8beSAlexey Skidanov 83259d3e8beSAlexey Skidanov for (i = 0; i < num_events; i++) { 83359d3e8beSAlexey Skidanov waiter = &event_waiters[i]; 83459d3e8beSAlexey Skidanov event = waiter->event; 83559d3e8beSAlexey Skidanov if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { 836ebf947feSFelix Kuehling dst = &data[i].memory_exception_data; 83759d3e8beSAlexey Skidanov src = &event->memory_exception_data; 83859d3e8beSAlexey Skidanov if (copy_to_user(dst, src, 83959d3e8beSAlexey Skidanov sizeof(struct kfd_hsa_memory_exception_data))) 840fdf0c833SFelix Kuehling return -EFAULT; 84159d3e8beSAlexey Skidanov } 84259d3e8beSAlexey Skidanov } 84359d3e8beSAlexey Skidanov 844fdf0c833SFelix Kuehling return 0; 84559d3e8beSAlexey Skidanov 84659d3e8beSAlexey Skidanov } 84759d3e8beSAlexey Skidanov 84859d3e8beSAlexey Skidanov 84959d3e8beSAlexey Skidanov 850f3a39818SAndrew Lewycky static long user_timeout_to_jiffies(uint32_t user_timeout_ms) 851f3a39818SAndrew Lewycky { 852f3a39818SAndrew Lewycky if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE) 853f3a39818SAndrew Lewycky return 0; 854f3a39818SAndrew Lewycky 855f3a39818SAndrew Lewycky if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE) 856f3a39818SAndrew Lewycky return MAX_SCHEDULE_TIMEOUT; 857f3a39818SAndrew Lewycky 858f3a39818SAndrew Lewycky /* 859f3a39818SAndrew Lewycky * msecs_to_jiffies interprets all values above 2^31-1 as infinite, 860f3a39818SAndrew Lewycky * but we consider them finite. 861f3a39818SAndrew Lewycky * This hack is wrong, but nobody is likely to notice. 862f3a39818SAndrew Lewycky */ 863f3a39818SAndrew Lewycky user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF); 864f3a39818SAndrew Lewycky 865f3a39818SAndrew Lewycky return msecs_to_jiffies(user_timeout_ms) + 1; 866f3a39818SAndrew Lewycky } 867f3a39818SAndrew Lewycky 868f3a39818SAndrew Lewycky static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters) 869f3a39818SAndrew Lewycky { 870f3a39818SAndrew Lewycky uint32_t i; 871f3a39818SAndrew Lewycky 872f3a39818SAndrew Lewycky for (i = 0; i < num_events; i++) 87374e40716SFelix Kuehling if (waiters[i].event) 87474e40716SFelix Kuehling remove_wait_queue(&waiters[i].event->wq, 87574e40716SFelix Kuehling &waiters[i].wait); 876f3a39818SAndrew Lewycky 877f3a39818SAndrew Lewycky kfree(waiters); 878f3a39818SAndrew Lewycky } 879f3a39818SAndrew Lewycky 880f3a39818SAndrew Lewycky int kfd_wait_on_events(struct kfd_process *p, 88159d3e8beSAlexey Skidanov uint32_t num_events, void __user *data, 882f3a39818SAndrew Lewycky bool all, uint32_t user_timeout_ms, 883fdf0c833SFelix Kuehling uint32_t *wait_result) 884f3a39818SAndrew Lewycky { 88559d3e8beSAlexey Skidanov struct kfd_event_data __user *events = 88659d3e8beSAlexey Skidanov (struct kfd_event_data __user *) data; 887f3a39818SAndrew Lewycky uint32_t i; 888f3a39818SAndrew Lewycky int ret = 0; 8891f9d09beSSean Keely 890f3a39818SAndrew Lewycky struct kfd_event_waiter *event_waiters = NULL; 891f3a39818SAndrew Lewycky long timeout = user_timeout_to_jiffies(user_timeout_ms); 892f3a39818SAndrew Lewycky 893fdf0c833SFelix Kuehling event_waiters = alloc_event_waiters(num_events); 894fdf0c833SFelix Kuehling if (!event_waiters) { 895fdf0c833SFelix Kuehling ret = -ENOMEM; 896fdf0c833SFelix Kuehling goto out; 897fdf0c833SFelix Kuehling } 898fdf0c833SFelix Kuehling 899f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 900f3a39818SAndrew Lewycky 901f3a39818SAndrew Lewycky for (i = 0; i < num_events; i++) { 90259d3e8beSAlexey Skidanov struct kfd_event_data event_data; 903f3a39818SAndrew Lewycky 90459d3e8beSAlexey Skidanov if (copy_from_user(&event_data, &events[i], 9058bf79388SPan Bian sizeof(struct kfd_event_data))) { 9068bf79388SPan Bian ret = -EFAULT; 907fdf0c833SFelix Kuehling goto out_unlock; 9088bf79388SPan Bian } 909f3a39818SAndrew Lewycky 9101f9d09beSSean Keely ret = init_event_waiter_get_status(p, &event_waiters[i], 911ebf947feSFelix Kuehling event_data.event_id); 912f3a39818SAndrew Lewycky if (ret) 913fdf0c833SFelix Kuehling goto out_unlock; 914f3a39818SAndrew Lewycky } 915f3a39818SAndrew Lewycky 9161f9d09beSSean Keely /* Check condition once. */ 917fe528c13SFelix Kuehling *wait_result = test_event_condition(all, num_events, event_waiters); 918fe528c13SFelix Kuehling if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) { 919fdf0c833SFelix Kuehling ret = copy_signaled_event_data(num_events, 920fdf0c833SFelix Kuehling event_waiters, events); 921fdf0c833SFelix Kuehling goto out_unlock; 922fe528c13SFelix Kuehling } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) { 923fe528c13SFelix Kuehling /* This should not happen. Events shouldn't be 924fe528c13SFelix Kuehling * destroyed while we're holding the event_mutex 925fe528c13SFelix Kuehling */ 926fe528c13SFelix Kuehling goto out_unlock; 927fe528c13SFelix Kuehling } 928fe528c13SFelix Kuehling 9291f9d09beSSean Keely /* Add to wait lists if we need to wait. */ 9301f9d09beSSean Keely for (i = 0; i < num_events; i++) 9311f9d09beSSean Keely init_event_waiter_add_to_waitlist(&event_waiters[i]); 9321f9d09beSSean Keely 933f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 934f3a39818SAndrew Lewycky 935f3a39818SAndrew Lewycky while (true) { 936f3a39818SAndrew Lewycky if (fatal_signal_pending(current)) { 937f3a39818SAndrew Lewycky ret = -EINTR; 938f3a39818SAndrew Lewycky break; 939f3a39818SAndrew Lewycky } 940f3a39818SAndrew Lewycky 941f3a39818SAndrew Lewycky if (signal_pending(current)) { 942f3a39818SAndrew Lewycky /* 943f3a39818SAndrew Lewycky * This is wrong when a nonzero, non-infinite timeout 944f3a39818SAndrew Lewycky * is specified. We need to use 945f3a39818SAndrew Lewycky * ERESTARTSYS_RESTARTBLOCK, but struct restart_block 946f3a39818SAndrew Lewycky * contains a union with data for each user and it's 947f3a39818SAndrew Lewycky * in generic kernel code that I don't want to 948f3a39818SAndrew Lewycky * touch yet. 949f3a39818SAndrew Lewycky */ 950f3a39818SAndrew Lewycky ret = -ERESTARTSYS; 951f3a39818SAndrew Lewycky break; 952f3a39818SAndrew Lewycky } 953f3a39818SAndrew Lewycky 954d9aeec4cSSean Keely /* Set task state to interruptible sleep before 955d9aeec4cSSean Keely * checking wake-up conditions. A concurrent wake-up 956d9aeec4cSSean Keely * will put the task back into runnable state. In that 957d9aeec4cSSean Keely * case schedule_timeout will not put the task to 958d9aeec4cSSean Keely * sleep and we'll get a chance to re-check the 959d9aeec4cSSean Keely * updated conditions almost immediately. Otherwise, 960d9aeec4cSSean Keely * this race condition would lead to a soft hang or a 961d9aeec4cSSean Keely * very long sleep. 962d9aeec4cSSean Keely */ 963d9aeec4cSSean Keely set_current_state(TASK_INTERRUPTIBLE); 964d9aeec4cSSean Keely 965fe528c13SFelix Kuehling *wait_result = test_event_condition(all, num_events, 966fe528c13SFelix Kuehling event_waiters); 967fe528c13SFelix Kuehling if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT) 968f3a39818SAndrew Lewycky break; 969f3a39818SAndrew Lewycky 970fe528c13SFelix Kuehling if (timeout <= 0) 971f3a39818SAndrew Lewycky break; 972f3a39818SAndrew Lewycky 973d9aeec4cSSean Keely timeout = schedule_timeout(timeout); 974f3a39818SAndrew Lewycky } 975f3a39818SAndrew Lewycky __set_current_state(TASK_RUNNING); 976f3a39818SAndrew Lewycky 977fdf0c833SFelix Kuehling /* copy_signaled_event_data may sleep. So this has to happen 978fdf0c833SFelix Kuehling * after the task state is set back to RUNNING. 979fdf0c833SFelix Kuehling */ 980fdf0c833SFelix Kuehling if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) 981fdf0c833SFelix Kuehling ret = copy_signaled_event_data(num_events, 982fdf0c833SFelix Kuehling event_waiters, events); 983fdf0c833SFelix Kuehling 984f3a39818SAndrew Lewycky mutex_lock(&p->event_mutex); 985fdf0c833SFelix Kuehling out_unlock: 986f3a39818SAndrew Lewycky free_waiters(num_events, event_waiters); 987f3a39818SAndrew Lewycky mutex_unlock(&p->event_mutex); 988fdf0c833SFelix Kuehling out: 989fdf0c833SFelix Kuehling if (ret) 990fdf0c833SFelix Kuehling *wait_result = KFD_IOC_WAIT_RESULT_FAIL; 991fe528c13SFelix Kuehling else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL) 992fe528c13SFelix Kuehling ret = -EIO; 993f3a39818SAndrew Lewycky 994f3a39818SAndrew Lewycky return ret; 995f3a39818SAndrew Lewycky } 996f3a39818SAndrew Lewycky 997f3a39818SAndrew Lewycky int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) 998f3a39818SAndrew Lewycky { 999f3a39818SAndrew Lewycky unsigned long pfn; 100050cb7dd9SFelix Kuehling struct kfd_signal_page *page; 1001b9a5d0a5SFelix Kuehling int ret; 1002f3a39818SAndrew Lewycky 1003b9a5d0a5SFelix Kuehling /* check required size doesn't exceed the allocated size */ 1004b9a5d0a5SFelix Kuehling if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) < 1005f3a39818SAndrew Lewycky get_order(vma->vm_end - vma->vm_start)) { 100679775b62SKent Russell pr_err("Event page mmap requested illegal size\n"); 1007f3a39818SAndrew Lewycky return -EINVAL; 1008f3a39818SAndrew Lewycky } 1009f3a39818SAndrew Lewycky 101050cb7dd9SFelix Kuehling page = p->signal_page; 1011f3a39818SAndrew Lewycky if (!page) { 1012f3a39818SAndrew Lewycky /* Probably KFD bug, but mmap is user-accessible. */ 101350cb7dd9SFelix Kuehling pr_debug("Signal page could not be found\n"); 1014f3a39818SAndrew Lewycky return -EINVAL; 1015f3a39818SAndrew Lewycky } 1016f3a39818SAndrew Lewycky 1017f3a39818SAndrew Lewycky pfn = __pa(page->kernel_address); 1018f3a39818SAndrew Lewycky pfn >>= PAGE_SHIFT; 1019f3a39818SAndrew Lewycky 1020f3a39818SAndrew Lewycky vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE 1021f3a39818SAndrew Lewycky | VM_DONTDUMP | VM_PFNMAP; 1022f3a39818SAndrew Lewycky 102379775b62SKent Russell pr_debug("Mapping signal page\n"); 1024f3a39818SAndrew Lewycky pr_debug(" start user address == 0x%08lx\n", vma->vm_start); 1025f3a39818SAndrew Lewycky pr_debug(" end user address == 0x%08lx\n", vma->vm_end); 1026f3a39818SAndrew Lewycky pr_debug(" pfn == 0x%016lX\n", pfn); 1027f3a39818SAndrew Lewycky pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags); 1028f3a39818SAndrew Lewycky pr_debug(" size == 0x%08lX\n", 1029f3a39818SAndrew Lewycky vma->vm_end - vma->vm_start); 1030f3a39818SAndrew Lewycky 1031f3a39818SAndrew Lewycky page->user_address = (uint64_t __user *)vma->vm_start; 1032f3a39818SAndrew Lewycky 1033f3a39818SAndrew Lewycky /* mapping the page to user process */ 1034b9a5d0a5SFelix Kuehling ret = remap_pfn_range(vma, vma->vm_start, pfn, 1035f3a39818SAndrew Lewycky vma->vm_end - vma->vm_start, vma->vm_page_prot); 1036b9a5d0a5SFelix Kuehling if (!ret) 1037b9a5d0a5SFelix Kuehling p->signal_mapped_size = vma->vm_end - vma->vm_start; 1038b9a5d0a5SFelix Kuehling 1039b9a5d0a5SFelix Kuehling return ret; 1040f3a39818SAndrew Lewycky } 104159d3e8beSAlexey Skidanov 104259d3e8beSAlexey Skidanov /* 104359d3e8beSAlexey Skidanov * Assumes that p->event_mutex is held and of course 104459d3e8beSAlexey Skidanov * that p is not going away (current or locked). 104559d3e8beSAlexey Skidanov */ 104659d3e8beSAlexey Skidanov static void lookup_events_by_type_and_signal(struct kfd_process *p, 104759d3e8beSAlexey Skidanov int type, void *event_data) 104859d3e8beSAlexey Skidanov { 104959d3e8beSAlexey Skidanov struct kfd_hsa_memory_exception_data *ev_data; 105059d3e8beSAlexey Skidanov struct kfd_event *ev; 1051482f0777SFelix Kuehling uint32_t id; 105259d3e8beSAlexey Skidanov bool send_signal = true; 105359d3e8beSAlexey Skidanov 105459d3e8beSAlexey Skidanov ev_data = (struct kfd_hsa_memory_exception_data *) event_data; 105559d3e8beSAlexey Skidanov 1056482f0777SFelix Kuehling id = KFD_FIRST_NONSIGNAL_EVENT_ID; 1057482f0777SFelix Kuehling idr_for_each_entry_continue(&p->event_idr, ev, id) 105859d3e8beSAlexey Skidanov if (ev->type == type) { 105959d3e8beSAlexey Skidanov send_signal = false; 106059d3e8beSAlexey Skidanov dev_dbg(kfd_device, 106159d3e8beSAlexey Skidanov "Event found: id %X type %d", 106259d3e8beSAlexey Skidanov ev->event_id, ev->type); 106359d3e8beSAlexey Skidanov set_event(ev); 106459d3e8beSAlexey Skidanov if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data) 106559d3e8beSAlexey Skidanov ev->memory_exception_data = *ev_data; 106659d3e8beSAlexey Skidanov } 106759d3e8beSAlexey Skidanov 1068101fee63SMoses Reuben if (type == KFD_EVENT_TYPE_MEMORY) { 1069101fee63SMoses Reuben dev_warn(kfd_device, 10706027b1bfSYong Zhao "Sending SIGSEGV to process %d (pasid 0x%x)", 10716027b1bfSYong Zhao p->lead_thread->pid, p->pasid); 1072101fee63SMoses Reuben send_sig(SIGSEGV, p->lead_thread, 0); 1073101fee63SMoses Reuben } 1074101fee63SMoses Reuben 107559d3e8beSAlexey Skidanov /* Send SIGTERM no event of type "type" has been found*/ 107659d3e8beSAlexey Skidanov if (send_signal) { 107781663016SOded Gabbay if (send_sigterm) { 107859d3e8beSAlexey Skidanov dev_warn(kfd_device, 10796027b1bfSYong Zhao "Sending SIGTERM to process %d (pasid 0x%x)", 10806027b1bfSYong Zhao p->lead_thread->pid, p->pasid); 108159d3e8beSAlexey Skidanov send_sig(SIGTERM, p->lead_thread, 0); 108281663016SOded Gabbay } else { 108381663016SOded Gabbay dev_err(kfd_device, 10846027b1bfSYong Zhao "Process %d (pasid 0x%x) got unhandled exception", 10856027b1bfSYong Zhao p->lead_thread->pid, p->pasid); 108681663016SOded Gabbay } 108759d3e8beSAlexey Skidanov } 108859d3e8beSAlexey Skidanov } 108959d3e8beSAlexey Skidanov 109064d1c3a4SFelix Kuehling #ifdef KFD_SUPPORT_IOMMU_V2 1091c7b6bac9SFenghua Yu void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, 109259d3e8beSAlexey Skidanov unsigned long address, bool is_write_requested, 109359d3e8beSAlexey Skidanov bool is_execute_requested) 109459d3e8beSAlexey Skidanov { 109559d3e8beSAlexey Skidanov struct kfd_hsa_memory_exception_data memory_exception_data; 109659d3e8beSAlexey Skidanov struct vm_area_struct *vma; 109759d3e8beSAlexey Skidanov 109859d3e8beSAlexey Skidanov /* 109959d3e8beSAlexey Skidanov * Because we are called from arbitrary context (workqueue) as opposed 110059d3e8beSAlexey Skidanov * to process context, kfd_process could attempt to exit while we are 1101abb208a8SFelix Kuehling * running so the lookup function increments the process ref count. 110259d3e8beSAlexey Skidanov */ 110359d3e8beSAlexey Skidanov struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 11049b56bb11SFelix Kuehling struct mm_struct *mm; 110559d3e8beSAlexey Skidanov 110659d3e8beSAlexey Skidanov if (!p) 110759d3e8beSAlexey Skidanov return; /* Presumably process exited. */ 110859d3e8beSAlexey Skidanov 11099b56bb11SFelix Kuehling /* Take a safe reference to the mm_struct, which may otherwise 11109b56bb11SFelix Kuehling * disappear even while the kfd_process is still referenced. 11119b56bb11SFelix Kuehling */ 11129b56bb11SFelix Kuehling mm = get_task_mm(p->lead_thread); 11139b56bb11SFelix Kuehling if (!mm) { 1114abb208a8SFelix Kuehling kfd_unref_process(p); 11159b56bb11SFelix Kuehling return; /* Process is exiting */ 11169b56bb11SFelix Kuehling } 11179b56bb11SFelix Kuehling 111859d3e8beSAlexey Skidanov memset(&memory_exception_data, 0, sizeof(memory_exception_data)); 111959d3e8beSAlexey Skidanov 1120d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 11219b56bb11SFelix Kuehling vma = find_vma(mm, address); 112259d3e8beSAlexey Skidanov 112359d3e8beSAlexey Skidanov memory_exception_data.gpu_id = dev->id; 112459d3e8beSAlexey Skidanov memory_exception_data.va = address; 112559d3e8beSAlexey Skidanov /* Set failure reason */ 112659d3e8beSAlexey Skidanov memory_exception_data.failure.NotPresent = 1; 112759d3e8beSAlexey Skidanov memory_exception_data.failure.NoExecute = 0; 112859d3e8beSAlexey Skidanov memory_exception_data.failure.ReadOnly = 0; 1129359cecddSYong Zhao if (vma && address >= vma->vm_start) { 113059d3e8beSAlexey Skidanov memory_exception_data.failure.NotPresent = 0; 1131359cecddSYong Zhao 113259d3e8beSAlexey Skidanov if (is_write_requested && !(vma->vm_flags & VM_WRITE)) 113359d3e8beSAlexey Skidanov memory_exception_data.failure.ReadOnly = 1; 113459d3e8beSAlexey Skidanov else 113559d3e8beSAlexey Skidanov memory_exception_data.failure.ReadOnly = 0; 1136359cecddSYong Zhao 113759d3e8beSAlexey Skidanov if (is_execute_requested && !(vma->vm_flags & VM_EXEC)) 113859d3e8beSAlexey Skidanov memory_exception_data.failure.NoExecute = 1; 113959d3e8beSAlexey Skidanov else 114059d3e8beSAlexey Skidanov memory_exception_data.failure.NoExecute = 0; 114159d3e8beSAlexey Skidanov } 114259d3e8beSAlexey Skidanov 1143d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 11449b56bb11SFelix Kuehling mmput(mm); 114559d3e8beSAlexey Skidanov 11468725aecaSYong Zhao pr_debug("notpresent %d, noexecute %d, readonly %d\n", 11478725aecaSYong Zhao memory_exception_data.failure.NotPresent, 11488725aecaSYong Zhao memory_exception_data.failure.NoExecute, 11498725aecaSYong Zhao memory_exception_data.failure.ReadOnly); 11508725aecaSYong Zhao 11518725aecaSYong Zhao /* Workaround on Raven to not kill the process when memory is freed 11528725aecaSYong Zhao * before IOMMU is able to finish processing all the excessive PPRs 11538725aecaSYong Zhao */ 1154046e674bSGraham Sider 1155046e674bSGraham Sider if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) && 1156046e674bSGraham Sider KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) && 1157046e674bSGraham Sider KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) { 115859d3e8beSAlexey Skidanov mutex_lock(&p->event_mutex); 115959d3e8beSAlexey Skidanov 116059d3e8beSAlexey Skidanov /* Lookup events by type and signal them */ 116159d3e8beSAlexey Skidanov lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY, 116259d3e8beSAlexey Skidanov &memory_exception_data); 116359d3e8beSAlexey Skidanov 116459d3e8beSAlexey Skidanov mutex_unlock(&p->event_mutex); 11658725aecaSYong Zhao } 11668725aecaSYong Zhao 1167abb208a8SFelix Kuehling kfd_unref_process(p); 116859d3e8beSAlexey Skidanov } 116964d1c3a4SFelix Kuehling #endif /* KFD_SUPPORT_IOMMU_V2 */ 1170930c5ff4SAlexey Skidanov 1171c7b6bac9SFenghua Yu void kfd_signal_hw_exception_event(u32 pasid) 1172930c5ff4SAlexey Skidanov { 1173930c5ff4SAlexey Skidanov /* 1174930c5ff4SAlexey Skidanov * Because we are called from arbitrary context (workqueue) as opposed 1175930c5ff4SAlexey Skidanov * to process context, kfd_process could attempt to exit while we are 1176abb208a8SFelix Kuehling * running so the lookup function increments the process ref count. 1177930c5ff4SAlexey Skidanov */ 1178930c5ff4SAlexey Skidanov struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 1179930c5ff4SAlexey Skidanov 1180930c5ff4SAlexey Skidanov if (!p) 1181930c5ff4SAlexey Skidanov return; /* Presumably process exited. */ 1182930c5ff4SAlexey Skidanov 1183930c5ff4SAlexey Skidanov mutex_lock(&p->event_mutex); 1184930c5ff4SAlexey Skidanov 1185930c5ff4SAlexey Skidanov /* Lookup events by type and signal them */ 1186930c5ff4SAlexey Skidanov lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL); 1187930c5ff4SAlexey Skidanov 1188930c5ff4SAlexey Skidanov mutex_unlock(&p->event_mutex); 1189abb208a8SFelix Kuehling kfd_unref_process(p); 1190930c5ff4SAlexey Skidanov } 11912640c3faSshaoyunl 1192c7b6bac9SFenghua Yu void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, 11932640c3faSshaoyunl struct kfd_vm_fault_info *info) 11942640c3faSshaoyunl { 11952640c3faSshaoyunl struct kfd_event *ev; 11962640c3faSshaoyunl uint32_t id; 11972640c3faSshaoyunl struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 11982640c3faSshaoyunl struct kfd_hsa_memory_exception_data memory_exception_data; 11992640c3faSshaoyunl 12002640c3faSshaoyunl if (!p) 12012640c3faSshaoyunl return; /* Presumably process exited. */ 12022640c3faSshaoyunl memset(&memory_exception_data, 0, sizeof(memory_exception_data)); 12032640c3faSshaoyunl memory_exception_data.gpu_id = dev->id; 12040d87c9cfSKent Russell memory_exception_data.failure.imprecise = true; 12052640c3faSshaoyunl /* Set failure reason */ 12062640c3faSshaoyunl if (info) { 12072640c3faSshaoyunl memory_exception_data.va = (info->page_addr) << PAGE_SHIFT; 12082640c3faSshaoyunl memory_exception_data.failure.NotPresent = 12092640c3faSshaoyunl info->prot_valid ? 1 : 0; 12102640c3faSshaoyunl memory_exception_data.failure.NoExecute = 12112640c3faSshaoyunl info->prot_exec ? 1 : 0; 12122640c3faSshaoyunl memory_exception_data.failure.ReadOnly = 12132640c3faSshaoyunl info->prot_write ? 1 : 0; 12142640c3faSshaoyunl memory_exception_data.failure.imprecise = 0; 12152640c3faSshaoyunl } 12162640c3faSshaoyunl mutex_lock(&p->event_mutex); 12172640c3faSshaoyunl 12182640c3faSshaoyunl id = KFD_FIRST_NONSIGNAL_EVENT_ID; 12192640c3faSshaoyunl idr_for_each_entry_continue(&p->event_idr, ev, id) 12202640c3faSshaoyunl if (ev->type == KFD_EVENT_TYPE_MEMORY) { 12212640c3faSshaoyunl ev->memory_exception_data = memory_exception_data; 12222640c3faSshaoyunl set_event(ev); 12232640c3faSshaoyunl } 12242640c3faSshaoyunl 12252640c3faSshaoyunl mutex_unlock(&p->event_mutex); 12262640c3faSshaoyunl kfd_unref_process(p); 12272640c3faSshaoyunl } 1228e42051d2SShaoyun Liu 1229e42051d2SShaoyun Liu void kfd_signal_reset_event(struct kfd_dev *dev) 1230e42051d2SShaoyun Liu { 1231e42051d2SShaoyun Liu struct kfd_hsa_hw_exception_data hw_exception_data; 12329b54d201SEric Huang struct kfd_hsa_memory_exception_data memory_exception_data; 1233e42051d2SShaoyun Liu struct kfd_process *p; 1234e42051d2SShaoyun Liu struct kfd_event *ev; 1235e42051d2SShaoyun Liu unsigned int temp; 1236e42051d2SShaoyun Liu uint32_t id, idx; 12379b54d201SEric Huang int reset_cause = atomic_read(&dev->sram_ecc_flag) ? 12389b54d201SEric Huang KFD_HW_EXCEPTION_ECC : 12399b54d201SEric Huang KFD_HW_EXCEPTION_GPU_HANG; 1240e42051d2SShaoyun Liu 1241e42051d2SShaoyun Liu /* Whole gpu reset caused by GPU hang and memory is lost */ 1242e42051d2SShaoyun Liu memset(&hw_exception_data, 0, sizeof(hw_exception_data)); 1243e42051d2SShaoyun Liu hw_exception_data.gpu_id = dev->id; 1244e42051d2SShaoyun Liu hw_exception_data.memory_lost = 1; 12459b54d201SEric Huang hw_exception_data.reset_cause = reset_cause; 12469b54d201SEric Huang 12479b54d201SEric Huang memset(&memory_exception_data, 0, sizeof(memory_exception_data)); 12489b54d201SEric Huang memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC; 12499b54d201SEric Huang memory_exception_data.gpu_id = dev->id; 12509b54d201SEric Huang memory_exception_data.failure.imprecise = true; 1251e42051d2SShaoyun Liu 1252e42051d2SShaoyun Liu idx = srcu_read_lock(&kfd_processes_srcu); 1253e42051d2SShaoyun Liu hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1254e42051d2SShaoyun Liu mutex_lock(&p->event_mutex); 1255e42051d2SShaoyun Liu id = KFD_FIRST_NONSIGNAL_EVENT_ID; 12569b54d201SEric Huang idr_for_each_entry_continue(&p->event_idr, ev, id) { 1257e42051d2SShaoyun Liu if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { 1258e42051d2SShaoyun Liu ev->hw_exception_data = hw_exception_data; 1259e42051d2SShaoyun Liu set_event(ev); 1260e42051d2SShaoyun Liu } 12619b54d201SEric Huang if (ev->type == KFD_EVENT_TYPE_MEMORY && 12629b54d201SEric Huang reset_cause == KFD_HW_EXCEPTION_ECC) { 12639b54d201SEric Huang ev->memory_exception_data = memory_exception_data; 12649b54d201SEric Huang set_event(ev); 12659b54d201SEric Huang } 12669b54d201SEric Huang } 1267e42051d2SShaoyun Liu mutex_unlock(&p->event_mutex); 1268e42051d2SShaoyun Liu } 1269e42051d2SShaoyun Liu srcu_read_unlock(&kfd_processes_srcu, idx); 1270e42051d2SShaoyun Liu } 1271e2b1f9f5SDennis Li 1272e2b1f9f5SDennis Li void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid) 1273e2b1f9f5SDennis Li { 1274e2b1f9f5SDennis Li struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 1275e2b1f9f5SDennis Li struct kfd_hsa_memory_exception_data memory_exception_data; 1276e2b1f9f5SDennis Li struct kfd_hsa_hw_exception_data hw_exception_data; 1277e2b1f9f5SDennis Li struct kfd_event *ev; 1278e2b1f9f5SDennis Li uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID; 1279e2b1f9f5SDennis Li 1280e2b1f9f5SDennis Li if (!p) 1281e2b1f9f5SDennis Li return; /* Presumably process exited. */ 1282e2b1f9f5SDennis Li 1283e2b1f9f5SDennis Li memset(&hw_exception_data, 0, sizeof(hw_exception_data)); 1284e2b1f9f5SDennis Li hw_exception_data.gpu_id = dev->id; 1285e2b1f9f5SDennis Li hw_exception_data.memory_lost = 1; 1286e2b1f9f5SDennis Li hw_exception_data.reset_cause = KFD_HW_EXCEPTION_ECC; 1287e2b1f9f5SDennis Li 1288e2b1f9f5SDennis Li memset(&memory_exception_data, 0, sizeof(memory_exception_data)); 1289e2b1f9f5SDennis Li memory_exception_data.ErrorType = KFD_MEM_ERR_POISON_CONSUMED; 1290e2b1f9f5SDennis Li memory_exception_data.gpu_id = dev->id; 1291e2b1f9f5SDennis Li memory_exception_data.failure.imprecise = true; 1292e2b1f9f5SDennis Li 1293e2b1f9f5SDennis Li mutex_lock(&p->event_mutex); 1294e2b1f9f5SDennis Li idr_for_each_entry_continue(&p->event_idr, ev, id) { 1295e2b1f9f5SDennis Li if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { 1296e2b1f9f5SDennis Li ev->hw_exception_data = hw_exception_data; 1297e2b1f9f5SDennis Li set_event(ev); 1298e2b1f9f5SDennis Li } 1299e2b1f9f5SDennis Li 1300e2b1f9f5SDennis Li if (ev->type == KFD_EVENT_TYPE_MEMORY) { 1301e2b1f9f5SDennis Li ev->memory_exception_data = memory_exception_data; 1302e2b1f9f5SDennis Li set_event(ev); 1303e2b1f9f5SDennis Li } 1304e2b1f9f5SDennis Li } 1305e2b1f9f5SDennis Li mutex_unlock(&p->event_mutex); 1306e2b1f9f5SDennis Li 1307e2b1f9f5SDennis Li /* user application will handle SIGBUS signal */ 1308e2b1f9f5SDennis Li send_sig(SIGBUS, p->lead_thread, 0); 130996b62c8aSDennis Li 131096b62c8aSDennis Li kfd_unref_process(p); 1311e2b1f9f5SDennis Li } 1312