1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
4 *
5 * Copyright (C) 2020 Intel Corporation. All rights reserved.
6 *
7 * Authors:
8 * Shuo Liu <shuo.a.liu@intel.com>
9 * Yakui Zhao <yakui.zhao@intel.com>
10 */
11
12 #include <linux/eventfd.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/slab.h>
16
17 #include "acrn_drv.h"
18
19 static LIST_HEAD(acrn_irqfd_clients);
20
21 /**
22 * struct hsm_irqfd - Properties of HSM irqfd
23 * @vm: Associated VM pointer
24 * @wait: Entry of wait-queue
25 * @shutdown: Async shutdown work
26 * @eventfd: Associated eventfd
27 * @list: Entry within &acrn_vm.irqfds of irqfds of a VM
28 * @pt: Structure for select/poll on the associated eventfd
29 * @msi: MSI data
30 */
31 struct hsm_irqfd {
32 struct acrn_vm *vm;
33 wait_queue_entry_t wait;
34 struct work_struct shutdown;
35 struct eventfd_ctx *eventfd;
36 struct list_head list;
37 poll_table pt;
38 struct acrn_msi_entry msi;
39 };
40
acrn_irqfd_inject(struct hsm_irqfd * irqfd)41 static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
42 {
43 struct acrn_vm *vm = irqfd->vm;
44
45 acrn_msi_inject(vm, irqfd->msi.msi_addr,
46 irqfd->msi.msi_data);
47 }
48
hsm_irqfd_shutdown(struct hsm_irqfd * irqfd)49 static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
50 {
51 u64 cnt;
52
53 lockdep_assert_held(&irqfd->vm->irqfds_lock);
54
55 /* remove from wait queue */
56 list_del_init(&irqfd->list);
57 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
58 eventfd_ctx_put(irqfd->eventfd);
59 kfree(irqfd);
60 }
61
hsm_irqfd_shutdown_work(struct work_struct * work)62 static void hsm_irqfd_shutdown_work(struct work_struct *work)
63 {
64 struct hsm_irqfd *irqfd;
65 struct acrn_vm *vm;
66
67 irqfd = container_of(work, struct hsm_irqfd, shutdown);
68 vm = irqfd->vm;
69 mutex_lock(&vm->irqfds_lock);
70 if (!list_empty(&irqfd->list))
71 hsm_irqfd_shutdown(irqfd);
72 mutex_unlock(&vm->irqfds_lock);
73 }
74
75 /* Called with wqh->lock held and interrupts disabled */
hsm_irqfd_wakeup(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)76 static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
77 int sync, void *key)
78 {
79 unsigned long poll_bits = (unsigned long)key;
80 struct hsm_irqfd *irqfd;
81 struct acrn_vm *vm;
82
83 irqfd = container_of(wait, struct hsm_irqfd, wait);
84 vm = irqfd->vm;
85 if (poll_bits & POLLIN)
86 /* An event has been signaled, inject an interrupt */
87 acrn_irqfd_inject(irqfd);
88
89 if (poll_bits & POLLHUP)
90 /* Do shutdown work in thread to hold wqh->lock */
91 queue_work(vm->irqfd_wq, &irqfd->shutdown);
92
93 return 0;
94 }
95
hsm_irqfd_poll_func(struct file * file,wait_queue_head_t * wqh,poll_table * pt)96 static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
97 poll_table *pt)
98 {
99 struct hsm_irqfd *irqfd;
100
101 irqfd = container_of(pt, struct hsm_irqfd, pt);
102 add_wait_queue(wqh, &irqfd->wait);
103 }
104
105 /*
106 * Assign an eventfd to a VM and create a HSM irqfd associated with the
107 * eventfd. The properties of the HSM irqfd are built from a &struct
108 * acrn_irqfd.
109 */
acrn_irqfd_assign(struct acrn_vm * vm,struct acrn_irqfd * args)110 static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
111 {
112 struct eventfd_ctx *eventfd = NULL;
113 struct hsm_irqfd *irqfd, *tmp;
114 __poll_t events;
115 int ret = 0;
116
117 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
118 if (!irqfd)
119 return -ENOMEM;
120
121 irqfd->vm = vm;
122 memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
123 INIT_LIST_HEAD(&irqfd->list);
124 INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
125
126 CLASS(fd, f)(args->fd);
127 if (fd_empty(f)) {
128 ret = -EBADF;
129 goto out;
130 }
131
132 eventfd = eventfd_ctx_fileget(fd_file(f));
133 if (IS_ERR(eventfd)) {
134 ret = PTR_ERR(eventfd);
135 goto out;
136 }
137
138 irqfd->eventfd = eventfd;
139
140 /*
141 * Install custom wake-up handling to be notified whenever underlying
142 * eventfd is signaled.
143 */
144 init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
145 init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
146
147 mutex_lock(&vm->irqfds_lock);
148 list_for_each_entry(tmp, &vm->irqfds, list) {
149 if (irqfd->eventfd != tmp->eventfd)
150 continue;
151 ret = -EBUSY;
152 mutex_unlock(&vm->irqfds_lock);
153 goto fail;
154 }
155 list_add_tail(&irqfd->list, &vm->irqfds);
156 mutex_unlock(&vm->irqfds_lock);
157
158 /* Check the pending event in this stage */
159 events = vfs_poll(fd_file(f), &irqfd->pt);
160
161 if (events & EPOLLIN)
162 acrn_irqfd_inject(irqfd);
163
164 return 0;
165 fail:
166 eventfd_ctx_put(eventfd);
167 out:
168 kfree(irqfd);
169 return ret;
170 }
171
acrn_irqfd_deassign(struct acrn_vm * vm,struct acrn_irqfd * args)172 static int acrn_irqfd_deassign(struct acrn_vm *vm,
173 struct acrn_irqfd *args)
174 {
175 struct hsm_irqfd *irqfd, *tmp;
176 struct eventfd_ctx *eventfd;
177
178 eventfd = eventfd_ctx_fdget(args->fd);
179 if (IS_ERR(eventfd))
180 return PTR_ERR(eventfd);
181
182 mutex_lock(&vm->irqfds_lock);
183 list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
184 if (irqfd->eventfd == eventfd) {
185 hsm_irqfd_shutdown(irqfd);
186 break;
187 }
188 }
189 mutex_unlock(&vm->irqfds_lock);
190 eventfd_ctx_put(eventfd);
191
192 return 0;
193 }
194
acrn_irqfd_config(struct acrn_vm * vm,struct acrn_irqfd * args)195 int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
196 {
197 int ret;
198
199 if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
200 ret = acrn_irqfd_deassign(vm, args);
201 else
202 ret = acrn_irqfd_assign(vm, args);
203
204 return ret;
205 }
206
acrn_irqfd_init(struct acrn_vm * vm)207 int acrn_irqfd_init(struct acrn_vm *vm)
208 {
209 INIT_LIST_HEAD(&vm->irqfds);
210 mutex_init(&vm->irqfds_lock);
211 vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
212 if (!vm->irqfd_wq)
213 return -ENOMEM;
214
215 dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
216 return 0;
217 }
218
acrn_irqfd_deinit(struct acrn_vm * vm)219 void acrn_irqfd_deinit(struct acrn_vm *vm)
220 {
221 struct hsm_irqfd *irqfd, *next;
222
223 dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
224 destroy_workqueue(vm->irqfd_wq);
225 mutex_lock(&vm->irqfds_lock);
226 list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
227 hsm_irqfd_shutdown(irqfd);
228 mutex_unlock(&vm->irqfds_lock);
229 }
230