1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts 4 * 5 * Copyright (C) 2020 Intel Corporation. All rights reserved. 6 * 7 * Authors: 8 * Shuo Liu <shuo.a.liu@intel.com> 9 * Yakui Zhao <yakui.zhao@intel.com> 10 */ 11 12 #include <linux/eventfd.h> 13 #include <linux/file.h> 14 #include <linux/poll.h> 15 #include <linux/slab.h> 16 17 #include "acrn_drv.h" 18 19 /** 20 * struct hsm_irqfd - Properties of HSM irqfd 21 * @vm: Associated VM pointer 22 * @wait: Entry of wait-queue 23 * @shutdown: Async shutdown work 24 * @eventfd: Associated eventfd 25 * @list: Entry within &acrn_vm.irqfds of irqfds of a VM 26 * @pt: Structure for select/poll on the associated eventfd 27 * @msi: MSI data 28 */ 29 struct hsm_irqfd { 30 struct acrn_vm *vm; 31 wait_queue_entry_t wait; 32 struct work_struct shutdown; 33 struct eventfd_ctx *eventfd; 34 struct list_head list; 35 poll_table pt; 36 struct acrn_msi_entry msi; 37 }; 38 39 static void acrn_irqfd_inject(struct hsm_irqfd *irqfd) 40 { 41 struct acrn_vm *vm = irqfd->vm; 42 43 acrn_msi_inject(vm, irqfd->msi.msi_addr, 44 irqfd->msi.msi_data); 45 } 46 47 static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd) 48 { 49 u64 cnt; 50 51 lockdep_assert_held(&irqfd->vm->irqfds_lock); 52 53 /* remove from wait queue */ 54 list_del_init(&irqfd->list); 55 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); 56 eventfd_ctx_put(irqfd->eventfd); 57 kfree(irqfd); 58 } 59 60 static void hsm_irqfd_shutdown_work(struct work_struct *work) 61 { 62 struct hsm_irqfd *irqfd; 63 struct acrn_vm *vm; 64 65 irqfd = container_of(work, struct hsm_irqfd, shutdown); 66 vm = irqfd->vm; 67 mutex_lock(&vm->irqfds_lock); 68 if (!list_empty(&irqfd->list)) 69 hsm_irqfd_shutdown(irqfd); 70 mutex_unlock(&vm->irqfds_lock); 71 } 72 73 /* Called with wqh->lock held and interrupts disabled */ 74 static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, 75 int sync, void *key) 76 { 77 unsigned long poll_bits = (unsigned long)key; 78 struct hsm_irqfd *irqfd; 79 struct acrn_vm *vm; 80 81 irqfd = container_of(wait, struct hsm_irqfd, wait); 82 vm = irqfd->vm; 83 if (poll_bits & POLLIN) 84 /* An event has been signaled, inject an interrupt */ 85 acrn_irqfd_inject(irqfd); 86 87 if (poll_bits & POLLHUP) 88 /* Do shutdown work in thread to hold wqh->lock */ 89 queue_work(vm->irqfd_wq, &irqfd->shutdown); 90 91 return 0; 92 } 93 94 static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, 95 poll_table *pt) 96 { 97 struct hsm_irqfd *irqfd; 98 99 irqfd = container_of(pt, struct hsm_irqfd, pt); 100 add_wait_queue(wqh, &irqfd->wait); 101 } 102 103 /* 104 * Assign an eventfd to a VM and create a HSM irqfd associated with the 105 * eventfd. The properties of the HSM irqfd are built from a &struct 106 * acrn_irqfd. 107 */ 108 static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args) 109 { 110 struct eventfd_ctx *eventfd = NULL; 111 struct hsm_irqfd *irqfd, *tmp; 112 __poll_t events; 113 int ret = 0; 114 115 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); 116 if (!irqfd) 117 return -ENOMEM; 118 119 irqfd->vm = vm; 120 memcpy(&irqfd->msi, &args->msi, sizeof(args->msi)); 121 INIT_LIST_HEAD(&irqfd->list); 122 INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work); 123 124 CLASS(fd, f)(args->fd); 125 if (fd_empty(f)) { 126 ret = -EBADF; 127 goto out; 128 } 129 130 eventfd = eventfd_ctx_fileget(fd_file(f)); 131 if (IS_ERR(eventfd)) { 132 ret = PTR_ERR(eventfd); 133 goto out; 134 } 135 136 irqfd->eventfd = eventfd; 137 138 /* 139 * Install custom wake-up handling to be notified whenever underlying 140 * eventfd is signaled. 141 */ 142 init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup); 143 init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func); 144 145 mutex_lock(&vm->irqfds_lock); 146 list_for_each_entry(tmp, &vm->irqfds, list) { 147 if (irqfd->eventfd != tmp->eventfd) 148 continue; 149 ret = -EBUSY; 150 mutex_unlock(&vm->irqfds_lock); 151 goto fail; 152 } 153 list_add_tail(&irqfd->list, &vm->irqfds); 154 mutex_unlock(&vm->irqfds_lock); 155 156 /* Check the pending event in this stage */ 157 events = vfs_poll(fd_file(f), &irqfd->pt); 158 159 if (events & EPOLLIN) 160 acrn_irqfd_inject(irqfd); 161 162 return 0; 163 fail: 164 eventfd_ctx_put(eventfd); 165 out: 166 kfree(irqfd); 167 return ret; 168 } 169 170 static int acrn_irqfd_deassign(struct acrn_vm *vm, 171 struct acrn_irqfd *args) 172 { 173 struct hsm_irqfd *irqfd, *tmp; 174 struct eventfd_ctx *eventfd; 175 176 eventfd = eventfd_ctx_fdget(args->fd); 177 if (IS_ERR(eventfd)) 178 return PTR_ERR(eventfd); 179 180 mutex_lock(&vm->irqfds_lock); 181 list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) { 182 if (irqfd->eventfd == eventfd) { 183 hsm_irqfd_shutdown(irqfd); 184 break; 185 } 186 } 187 mutex_unlock(&vm->irqfds_lock); 188 eventfd_ctx_put(eventfd); 189 190 return 0; 191 } 192 193 int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args) 194 { 195 int ret; 196 197 if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN) 198 ret = acrn_irqfd_deassign(vm, args); 199 else 200 ret = acrn_irqfd_assign(vm, args); 201 202 return ret; 203 } 204 205 int acrn_irqfd_init(struct acrn_vm *vm) 206 { 207 INIT_LIST_HEAD(&vm->irqfds); 208 mutex_init(&vm->irqfds_lock); 209 vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid); 210 if (!vm->irqfd_wq) 211 return -ENOMEM; 212 213 dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid); 214 return 0; 215 } 216 217 void acrn_irqfd_deinit(struct acrn_vm *vm) 218 { 219 struct hsm_irqfd *irqfd, *next; 220 221 dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid); 222 destroy_workqueue(vm->irqfd_wq); 223 mutex_lock(&vm->irqfds_lock); 224 list_for_each_entry_safe(irqfd, next, &vm->irqfds, list) 225 hsm_irqfd_shutdown(irqfd); 226 mutex_unlock(&vm->irqfds_lock); 227 } 228