1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 /*
25 * KFD Interrupts.
26 *
27 * AMD GPUs deliver interrupts by pushing an interrupt description onto the
28 * interrupt ring and then sending an interrupt. KGD receives the interrupt
29 * in ISR and sends us a pointer to each new entry on the interrupt ring.
30 *
31 * We generally can't process interrupt-signaled events from ISR, so we call
32 * out to each interrupt client module (currently only the scheduler) to ask if
33 * each interrupt is interesting. If they return true, then it requires further
34 * processing so we copy it to an internal interrupt ring and call each
35 * interrupt client again from a work-queue.
36 *
37 * There's no acknowledgment for the interrupts we use. The hardware simply
38 * queues a new interrupt each time without waiting.
39 *
40 * The fixed-size internal queue means that it's possible for us to lose
41 * interrupts because we have no back-pressure to the hardware.
42 */
43
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/kfifo.h>
47 #include "kfd_priv.h"
48
49 #define KFD_IH_NUM_ENTRIES 16384
50
51 static void interrupt_wq(struct work_struct *);
52
kfd_interrupt_init(struct kfd_node * node)53 int kfd_interrupt_init(struct kfd_node *node)
54 {
55 int r;
56
57 r = kfifo_alloc(&node->ih_fifo,
58 KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size,
59 GFP_KERNEL);
60 if (r) {
61 dev_err(node->adev->dev, "Failed to allocate IH fifo\n");
62 return r;
63 }
64
65 if (!node->kfd->ih_wq) {
66 node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
67 node->kfd->num_nodes);
68 if (unlikely(!node->kfd->ih_wq)) {
69 kfifo_free(&node->ih_fifo);
70 dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
71 return -ENOMEM;
72 }
73 }
74 spin_lock_init(&node->interrupt_lock);
75
76 INIT_WORK(&node->interrupt_work, interrupt_wq);
77
78 node->interrupts_active = true;
79
80 /*
81 * After this function returns, the interrupt will be enabled. This
82 * barrier ensures that the interrupt running on a different processor
83 * sees all the above writes.
84 */
85 smp_wmb();
86
87 return 0;
88 }
89
kfd_interrupt_exit(struct kfd_node * node)90 void kfd_interrupt_exit(struct kfd_node *node)
91 {
92 /*
93 * Stop the interrupt handler from writing to the ring and scheduling
94 * workqueue items. The spinlock ensures that any interrupt running
95 * after we have unlocked sees interrupts_active = false.
96 */
97 unsigned long flags;
98
99 spin_lock_irqsave(&node->interrupt_lock, flags);
100 node->interrupts_active = false;
101 spin_unlock_irqrestore(&node->interrupt_lock, flags);
102 kfifo_free(&node->ih_fifo);
103 }
104
105 /*
106 * Assumption: single reader/writer. This function is not re-entrant
107 */
enqueue_ih_ring_entry(struct kfd_node * node,const void * ih_ring_entry)108 bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
109 {
110 if (kfifo_is_full(&node->ih_fifo)) {
111 dev_warn_ratelimited(node->adev->dev, "KFD node %d ih_fifo overflow\n",
112 node->node_id);
113 return false;
114 }
115
116 kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size);
117 return true;
118 }
119
120 /*
121 * Assumption: single reader/writer. This function is not re-entrant
122 */
dequeue_ih_ring_entry(struct kfd_node * node,u32 ** ih_ring_entry)123 static bool dequeue_ih_ring_entry(struct kfd_node *node, u32 **ih_ring_entry)
124 {
125 int count;
126
127 if (kfifo_is_empty(&node->ih_fifo))
128 return false;
129
130 count = kfifo_out_linear_ptr(&node->ih_fifo, ih_ring_entry,
131 node->kfd->device_info.ih_ring_entry_size);
132 WARN_ON(count != node->kfd->device_info.ih_ring_entry_size);
133 return count == node->kfd->device_info.ih_ring_entry_size;
134 }
135
interrupt_wq(struct work_struct * work)136 static void interrupt_wq(struct work_struct *work)
137 {
138 struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work);
139 uint32_t *ih_ring_entry;
140 unsigned long start_jiffies = jiffies;
141
142 while (dequeue_ih_ring_entry(dev, &ih_ring_entry)) {
143 dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
144 ih_ring_entry);
145 kfifo_skip_count(&dev->ih_fifo, dev->kfd->device_info.ih_ring_entry_size);
146
147 if (time_is_before_jiffies(start_jiffies + HZ)) {
148 /* If we spent more than a second processing signals,
149 * reschedule the worker to avoid soft-lockup warnings
150 */
151 queue_work(dev->kfd->ih_wq, &dev->interrupt_work);
152 break;
153 }
154 }
155 }
156
interrupt_is_wanted(struct kfd_node * dev,const uint32_t * ih_ring_entry,uint32_t * patched_ihre,bool * flag)157 bool interrupt_is_wanted(struct kfd_node *dev,
158 const uint32_t *ih_ring_entry,
159 uint32_t *patched_ihre, bool *flag)
160 {
161 /* integer and bitwise OR so there is no boolean short-circuiting */
162 unsigned int wanted = 0;
163
164 wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev,
165 ih_ring_entry, patched_ihre, flag);
166
167 return wanted != 0;
168 }
169