1 /*- 2 * SPDX-License-Identifier: MIT OR GPL-2.0-only 3 * 4 * Copyright © 2015 Julien Grall 5 * Copyright © 2013 Spectra Logic Corporation 6 * Copyright © 2018 John Baldwin/The FreeBSD Foundation 7 * Copyright © 2019 Roger Pau Monné/Citrix Systems R&D 8 * Copyright © 2021 Elliott Mitchell 9 * 10 * This file may be distributed separately from the Linux kernel, or 11 * incorporated into other software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/interrupt.h> 44 #include <sys/pcpu.h> 45 #include <sys/proc.h> 46 #include <sys/smp.h> 47 #include <sys/stddef.h> 48 49 #include <xen/xen-os.h> 50 #include <xen/xen_intr.h> 51 #include <machine/xen/arch-intr.h> 52 53 #include <x86/apicvar.h> 54 55 /************************ Xen x86 interrupt interface ************************/ 56 57 /* 58 * Pointers to the interrupt counters 59 */ 60 DPCPU_DEFINE_STATIC(u_long *, pintrcnt); 61 62 static void 63 xen_intrcnt_init(void *dummy __unused) 64 { 65 unsigned int i; 66 67 if (!xen_domain()) 68 return; 69 70 CPU_FOREACH(i) { 71 char buf[MAXCOMLEN + 1]; 72 73 snprintf(buf, sizeof(buf), "cpu%d:xen", i); 74 intrcnt_add(buf, DPCPU_ID_PTR(i, pintrcnt)); 75 } 76 } 77 SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL); 78 79 /* 80 * Transition from assembly language, called from 81 * sys/{amd64/amd64|i386/i386}/apic_vector.S 82 */ 83 extern void xen_arch_intr_handle_upcall(struct trapframe *); 84 void 85 xen_arch_intr_handle_upcall(struct trapframe *trap_frame) 86 { 87 struct trapframe *old; 88 89 /* 90 * Disable preemption in order to always check and fire events 91 * on the right vCPU 92 */ 93 critical_enter(); 94 95 ++*DPCPU_GET(pintrcnt); 96 97 ++curthread->td_intr_nesting_level; 98 old = curthread->td_intr_frame; 99 curthread->td_intr_frame = trap_frame; 100 101 xen_intr_handle_upcall(NULL); 102 103 curthread->td_intr_frame = old; 104 --curthread->td_intr_nesting_level; 105 106 if (xen_evtchn_needs_ack) 107 lapic_eoi(); 108 109 critical_exit(); 110 } 111 112 /******************************** EVTCHN PIC *********************************/ 113 114 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); 115 116 /* 117 * Lock for x86-related structures. Notably modifying 118 * xen_intr_auto_vector_count, and allocating interrupts require this lock be 119 * held. 120 */ 121 static struct mtx xen_intr_x86_lock; 122 123 static u_int first_evtchn_irq; 124 125 static u_int xen_intr_auto_vector_count; 126 127 /* 128 * list of released isrcs 129 * This is meant to overlay struct xenisrc, with only the xen_arch_isrc_t 130 * portion being preserved, everything else can be wiped. 131 */ 132 struct avail_list { 133 xen_arch_isrc_t preserve; 134 SLIST_ENTRY(avail_list) free; 135 }; 136 static SLIST_HEAD(free, avail_list) avail_list = 137 SLIST_HEAD_INITIALIZER(avail_list); 138 139 void 140 xen_intr_alloc_irqs(void) 141 { 142 143 if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS) 144 panic("IRQ allocation overflow (num_msi_irqs too high?)"); 145 first_evtchn_irq = num_io_irqs; 146 num_io_irqs += NR_EVENT_CHANNELS; 147 } 148 149 static void 150 xen_intr_pic_enable_source(struct intsrc *isrc) 151 { 152 153 _Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0, 154 "xi_arch MUST be at top of xenisrc for x86"); 155 xen_intr_enable_source((struct xenisrc *)isrc); 156 } 157 158 /* 159 * Perform any necessary end-of-interrupt acknowledgements. 160 * 161 * \param isrc The interrupt source to EOI. 162 */ 163 static void 164 xen_intr_pic_disable_source(struct intsrc *isrc, int eoi) 165 { 166 167 _Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0, 168 "xi_arch MUST be at top of xenisrc for x86"); 169 xen_intr_disable_source((struct xenisrc *)isrc); 170 } 171 172 static void 173 xen_intr_pic_eoi_source(struct intsrc *isrc) 174 { 175 176 /* Nothing to do on end-of-interrupt */ 177 } 178 179 static void 180 xen_intr_pic_enable_intr(struct intsrc *isrc) 181 { 182 183 _Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0, 184 "xi_arch MUST be at top of xenisrc for x86"); 185 xen_intr_enable_intr((struct xenisrc *)isrc); 186 } 187 188 static void 189 xen_intr_pic_disable_intr(struct intsrc *isrc) 190 { 191 192 _Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0, 193 "xi_arch MUST be at top of xenisrc for x86"); 194 xen_intr_disable_intr((struct xenisrc *)isrc); 195 } 196 197 /** 198 * Determine the global interrupt vector number for 199 * a Xen interrupt source. 200 * 201 * \param isrc The interrupt source to query. 202 * 203 * \return The vector number corresponding to the given interrupt source. 204 */ 205 static int 206 xen_intr_pic_vector(struct intsrc *isrc) 207 { 208 209 _Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0, 210 "xi_arch MUST be at top of xenisrc for x86"); 211 212 return (((struct xenisrc *)isrc)->xi_arch.vector); 213 } 214 215 /** 216 * Determine whether or not interrupt events are pending on the 217 * the given interrupt source. 218 * 219 * \param isrc The interrupt source to query. 220 * 221 * \returns 0 if no events are pending, otherwise non-zero. 222 */ 223 static int 224 xen_intr_pic_source_pending(struct intsrc *isrc) 225 { 226 /* 227 * EventChannels are edge triggered and never masked. 228 * There can be no pending events. 229 */ 230 return (0); 231 } 232 233 /** 234 * Prepare this PIC for system suspension. 235 */ 236 static void 237 xen_intr_pic_suspend(struct pic *pic) 238 { 239 240 /* Nothing to do on suspend */ 241 } 242 243 static void 244 xen_intr_pic_resume(struct pic *pic, bool suspend_cancelled) 245 { 246 247 if (!suspend_cancelled) 248 xen_intr_resume(); 249 } 250 251 /** 252 * Perform configuration of an interrupt source. 253 * 254 * \param isrc The interrupt source to configure. 255 * \param trig Edge or level. 256 * \param pol Active high or low. 257 * 258 * \returns 0 if no events are pending, otherwise non-zero. 259 */ 260 static int 261 xen_intr_pic_config_intr(struct intsrc *isrc, enum intr_trigger trig, 262 enum intr_polarity pol) 263 { 264 /* Configuration is only possible via the evtchn apis. */ 265 return (ENODEV); 266 } 267 268 269 static int 270 xen_intr_pic_assign_cpu(struct intsrc *isrc, u_int apic_id) 271 { 272 273 _Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0, 274 "xi_arch MUST be at top of xenisrc for x86"); 275 return (xen_intr_assign_cpu((struct xenisrc *)isrc, 276 apic_cpuid(apic_id))); 277 } 278 279 /** 280 * PIC interface for all event channel port types except physical IRQs. 281 */ 282 static struct pic xen_intr_pic = { 283 .pic_enable_source = xen_intr_pic_enable_source, 284 .pic_disable_source = xen_intr_pic_disable_source, 285 .pic_eoi_source = xen_intr_pic_eoi_source, 286 .pic_enable_intr = xen_intr_pic_enable_intr, 287 .pic_disable_intr = xen_intr_pic_disable_intr, 288 .pic_vector = xen_intr_pic_vector, 289 .pic_source_pending = xen_intr_pic_source_pending, 290 .pic_suspend = xen_intr_pic_suspend, 291 .pic_resume = xen_intr_pic_resume, 292 .pic_config_intr = xen_intr_pic_config_intr, 293 .pic_assign_cpu = xen_intr_pic_assign_cpu, 294 }; 295 296 /******************************* ARCH wrappers *******************************/ 297 298 void 299 xen_arch_intr_init(void) 300 { 301 int error; 302 303 mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF); 304 305 error = intr_register_pic(&xen_intr_pic); 306 if (error != 0) 307 panic("%s(): failed registering Xen/x86 PIC, error=%d\n", 308 __func__, error); 309 } 310 311 /** 312 * Allocate a Xen interrupt source object. 313 * 314 * \param type The type of interrupt source to create. 315 * 316 * \return A pointer to a newly allocated Xen interrupt source 317 * object or NULL. 318 */ 319 struct xenisrc * 320 xen_arch_intr_alloc(void) 321 { 322 static int warned; 323 struct xenisrc *isrc; 324 unsigned int vector; 325 int error; 326 327 mtx_lock(&xen_intr_x86_lock); 328 isrc = (struct xenisrc *)SLIST_FIRST(&avail_list); 329 if (isrc != NULL) { 330 SLIST_REMOVE_HEAD(&avail_list, free); 331 mtx_unlock(&xen_intr_x86_lock); 332 333 KASSERT(isrc->xi_arch.intsrc.is_pic == &xen_intr_pic, 334 ("interrupt not owned by Xen code?")); 335 336 KASSERT(isrc->xi_arch.intsrc.is_handlers == 0, 337 ("Free evtchn still has handlers")); 338 339 return (isrc); 340 } 341 342 if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) { 343 if (!warned) { 344 warned = 1; 345 printf("%s: Xen interrupts exhausted.\n", __func__); 346 } 347 mtx_unlock(&xen_intr_x86_lock); 348 return (NULL); 349 } 350 351 vector = first_evtchn_irq + xen_intr_auto_vector_count; 352 xen_intr_auto_vector_count++; 353 354 KASSERT((intr_lookup_source(vector) == NULL), 355 ("Trying to use an already allocated vector")); 356 357 mtx_unlock(&xen_intr_x86_lock); 358 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); 359 isrc->xi_arch.intsrc.is_pic = &xen_intr_pic; 360 isrc->xi_arch.vector = vector; 361 error = intr_register_source(&isrc->xi_arch.intsrc); 362 if (error != 0) 363 panic("%s(): failed registering interrupt %u, error=%d\n", 364 __func__, vector, error); 365 366 return (isrc); 367 } 368 369 void 370 xen_arch_intr_release(struct xenisrc *isrc) 371 { 372 373 KASSERT(isrc->xi_arch.intsrc.is_handlers == 0, 374 ("Release called, but xenisrc still in use")); 375 376 _Static_assert(sizeof(struct xenisrc) >= sizeof(struct avail_list), 377 "unused structure MUST be no larger than in-use structure"); 378 _Static_assert(offsetof(struct xenisrc, xi_arch) == 379 offsetof(struct avail_list, preserve), 380 "unused structure does not properly overlay in-use structure"); 381 382 mtx_lock(&xen_intr_x86_lock); 383 SLIST_INSERT_HEAD(&avail_list, (struct avail_list *)isrc, free); 384 mtx_unlock(&xen_intr_x86_lock); 385 } 386