1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2006 Yahoo!, Inc.
5 * All rights reserved.
6 * Written by: John Baldwin <jhb@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /*
34 * Support for PCI Message Signalled Interrupts (MSI). MSI interrupts on
35 * x86 are basically APIC messages that the northbridge delivers directly
36 * to the local APICs as if they had come from an I/O APIC.
37 */
38
39 #include <sys/cdefs.h>
40 #include "opt_acpi.h"
41 #include "opt_iommu.h"
42
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/sx.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 #include <x86/apicreg.h>
54 #include <machine/cputypes.h>
55 #include <machine/md_var.h>
56 #include <machine/frame.h>
57 #include <machine/intr_machdep.h>
58 #include <x86/apicvar.h>
59 #include <x86/iommu/iommu_intrmap.h>
60 #include <machine/specialreg.h>
61 #include <dev/pci/pcivar.h>
62
63 /* Fields in address for Intel MSI messages. */
64 #define MSI_INTEL_ADDR_DEST 0x000ff000
65 #define MSI_INTEL_ADDR_RH 0x00000008
66 # define MSI_INTEL_ADDR_RH_ON 0x00000008
67 # define MSI_INTEL_ADDR_RH_OFF 0x00000000
68 #define MSI_INTEL_ADDR_DM 0x00000004
69 # define MSI_INTEL_ADDR_DM_PHYSICAL 0x00000000
70 # define MSI_INTEL_ADDR_DM_LOGICAL 0x00000004
71
72 /* Fields in data for Intel MSI messages. */
73 #define MSI_INTEL_DATA_TRGRMOD IOART_TRGRMOD /* Trigger mode. */
74 # define MSI_INTEL_DATA_TRGREDG IOART_TRGREDG
75 # define MSI_INTEL_DATA_TRGRLVL IOART_TRGRLVL
76 #define MSI_INTEL_DATA_LEVEL 0x00004000 /* Polarity. */
77 # define MSI_INTEL_DATA_DEASSERT 0x00000000
78 # define MSI_INTEL_DATA_ASSERT 0x00004000
79 #define MSI_INTEL_DATA_DELMOD IOART_DELMOD /* Delivery mode. */
80 # define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED
81 # define MSI_INTEL_DATA_DELLOPRI IOART_DELLOPRI
82 # define MSI_INTEL_DATA_DELSMI IOART_DELSMI
83 # define MSI_INTEL_DATA_DELNMI IOART_DELNMI
84 # define MSI_INTEL_DATA_DELINIT IOART_DELINIT
85 # define MSI_INTEL_DATA_DELEXINT IOART_DELEXINT
86 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */
87
88 /*
89 * Build Intel MSI message and data values from a source. AMD64 systems
90 * seem to be compatible, so we use the same function for both.
91 */
92 #define INTEL_ADDR(msi) \
93 (MSI_INTEL_ADDR_BASE | (msi)->msi_cpu << 12 | \
94 MSI_INTEL_ADDR_RH_OFF | MSI_INTEL_ADDR_DM_PHYSICAL)
95 #define INTEL_DATA(msi) \
96 (MSI_INTEL_DATA_TRGREDG | MSI_INTEL_DATA_DELFIXED | (msi)->msi_vector)
97
98 static MALLOC_DEFINE(M_MSI, "msi", "PCI MSI");
99
100 /*
101 * MSI sources are bunched into groups. This is because MSI forces
102 * all of the messages to share the address and data registers and
103 * thus certain properties (such as the local APIC ID target on x86).
104 * Each group has a 'first' source that contains information global to
105 * the group. These fields are marked with (g) below.
106 *
107 * Note that local APIC ID is kind of special. Each message will be
108 * assigned an ID by the system; however, a group will use the ID from
109 * the first message.
110 *
111 * For MSI-X, each message is isolated.
112 */
113 struct msi_intsrc {
114 struct intsrc msi_intsrc;
115 device_t msi_dev; /* Owning device. (g) */
116 struct msi_intsrc *msi_first; /* First source in group. */
117 u_int *msi_irqs; /* Group's IRQ list. (g) */
118 u_int msi_irq; /* IRQ cookie. */
119 u_int msi_cpu; /* Local APIC ID. (g) */
120 u_int msi_remap_cookie; /* IOMMU cookie. */
121 u_int msi_vector:8; /* IDT vector. */
122 u_int msi_count:8; /* Messages in this group. (g) */
123 u_int msi_maxcount:8; /* Alignment for this group. (g) */
124 u_int msi_enabled:8; /* Enabled messages in this group. (g) */
125 bool msi_msix; /* MSI-X message. */
126 };
127
128 static void msi_create_source(void);
129 static void msi_enable_source(struct intsrc *isrc);
130 static void msi_disable_source(struct intsrc *isrc, int eoi);
131 static void msi_eoi_source(struct intsrc *isrc);
132 static void msi_enable_intr(struct intsrc *isrc);
133 static void msi_disable_intr(struct intsrc *isrc);
134 static int msi_vector(struct intsrc *isrc);
135 static int msi_source_pending(struct intsrc *isrc);
136 static int msi_config_intr(struct intsrc *isrc, enum intr_trigger trig,
137 enum intr_polarity pol);
138 static int msi_assign_cpu(struct intsrc *isrc, u_int apic_id);
139
140 struct pic msi_pic = {
141 .pic_enable_source = msi_enable_source,
142 .pic_disable_source = msi_disable_source,
143 .pic_eoi_source = msi_eoi_source,
144 .pic_enable_intr = msi_enable_intr,
145 .pic_disable_intr = msi_disable_intr,
146 .pic_vector = msi_vector,
147 .pic_source_pending = msi_source_pending,
148 .pic_suspend = NULL,
149 .pic_resume = NULL,
150 .pic_config_intr = msi_config_intr,
151 .pic_assign_cpu = msi_assign_cpu,
152 .pic_reprogram_pin = NULL,
153 };
154
155 u_int first_msi_irq;
156 SYSCTL_UINT(_machdep, OID_AUTO, first_msi_irq, CTLFLAG_RD, &first_msi_irq, 0,
157 "Number of first IRQ reserved for MSI and MSI-X interrupts");
158
159 u_int num_msi_irqs = 2048;
160 SYSCTL_UINT(_machdep, OID_AUTO, num_msi_irqs, CTLFLAG_RDTUN, &num_msi_irqs, 0,
161 "Number of IRQs reserved for MSI and MSI-X interrupts");
162
163 #ifdef SMP
164 /**
165 * Xen hypervisors prior to 4.6.0 do not properly handle updates to
166 * enabled MSI-X table entries. Allow migration of MSI-X interrupts
167 * to be disabled via a tunable. Values have the following meaning:
168 *
169 * -1: automatic detection by FreeBSD
170 * 0: enable migration
171 * 1: disable migration
172 */
173 int msix_disable_migration = -1;
174 SYSCTL_INT(_machdep, OID_AUTO, disable_msix_migration, CTLFLAG_RDTUN,
175 &msix_disable_migration, 0,
176 "Disable migration of MSI-X interrupts between CPUs");
177 #endif
178
179 static int msi_enabled;
180 static u_int msi_last_irq;
181 static struct mtx msi_lock;
182
183 static void
msi_enable_source(struct intsrc * isrc)184 msi_enable_source(struct intsrc *isrc)
185 {
186 }
187
188 static void
msi_disable_source(struct intsrc * isrc,int eoi)189 msi_disable_source(struct intsrc *isrc, int eoi)
190 {
191
192 if (eoi == PIC_EOI)
193 lapic_eoi();
194 }
195
196 static void
msi_eoi_source(struct intsrc * isrc)197 msi_eoi_source(struct intsrc *isrc)
198 {
199
200 lapic_eoi();
201 }
202
203 static void
msi_enable_intr(struct intsrc * isrc)204 msi_enable_intr(struct intsrc *isrc)
205 {
206 struct msi_intsrc *msi = (struct msi_intsrc *)isrc;
207
208 msi = msi->msi_first;
209 if (msi->msi_enabled == 0) {
210 for (u_int i = 0; i < msi->msi_count; i++)
211 apic_enable_vector(msi->msi_cpu, msi->msi_vector + i);
212 }
213 msi->msi_enabled++;
214 }
215
216 static void
msi_disable_intr(struct intsrc * isrc)217 msi_disable_intr(struct intsrc *isrc)
218 {
219 struct msi_intsrc *msi = (struct msi_intsrc *)isrc;
220
221 msi = msi->msi_first;
222
223 /*
224 * Interrupt sources are always registered, but never unregistered.
225 * Handle the case where MSIs have all been unregistered.
226 */
227 if (msi == NULL)
228 return;
229
230 msi->msi_enabled--;
231 if (msi->msi_enabled == 0) {
232 for (u_int i = 0; i < msi->msi_count; i++)
233 apic_disable_vector(msi->msi_cpu, msi->msi_vector + i);
234 }
235 }
236
237 static int
msi_vector(struct intsrc * isrc)238 msi_vector(struct intsrc *isrc)
239 {
240 struct msi_intsrc *msi = (struct msi_intsrc *)isrc;
241
242 return (msi->msi_irq);
243 }
244
245 static int
msi_source_pending(struct intsrc * isrc)246 msi_source_pending(struct intsrc *isrc)
247 {
248
249 return (0);
250 }
251
252 static int
msi_config_intr(struct intsrc * isrc,enum intr_trigger trig,enum intr_polarity pol)253 msi_config_intr(struct intsrc *isrc, enum intr_trigger trig,
254 enum intr_polarity pol)
255 {
256
257 return (ENODEV);
258 }
259
260 static int
msi_assign_cpu(struct intsrc * isrc,u_int apic_id)261 msi_assign_cpu(struct intsrc *isrc, u_int apic_id)
262 {
263 struct msi_intsrc *sib, *msi = (struct msi_intsrc *)isrc;
264 int old_vector;
265 u_int old_id;
266 int error, i, vector;
267
268 /*
269 * Only allow CPUs to be assigned to the first message for an
270 * MSI group.
271 */
272 if (msi->msi_first != msi)
273 return (EINVAL);
274
275 #ifdef SMP
276 if (msix_disable_migration && msi->msi_msix)
277 return (EINVAL);
278 #endif
279
280 /* Store information to free existing irq. */
281 old_vector = msi->msi_vector;
282 old_id = msi->msi_cpu;
283 if (old_id == apic_id)
284 return (0);
285
286 /* Allocate IDT vectors on this cpu. */
287 if (msi->msi_count > 1) {
288 KASSERT(!msi->msi_msix, ("MSI-X message group"));
289 vector = apic_alloc_vectors(apic_id, msi->msi_irqs,
290 msi->msi_count, msi->msi_maxcount);
291 } else
292 vector = apic_alloc_vector(apic_id, msi->msi_irq);
293 if (vector == 0)
294 return (ENOSPC);
295
296 /* Must be set before BUS_REMAP_INTR as it may call back into MSI. */
297 msi->msi_cpu = apic_id;
298 msi->msi_vector = vector;
299 if (msi->msi_enabled > 0) {
300 for (i = 0; i < msi->msi_count; i++)
301 apic_enable_vector(apic_id, vector + i);
302 }
303 error = BUS_REMAP_INTR(device_get_parent(msi->msi_dev), msi->msi_dev,
304 msi->msi_irq);
305 if (error == 0) {
306 if (bootverbose) {
307 printf("msi: Assigning %s IRQ %d to local APIC %u vector %u\n",
308 msi->msi_msix ? "MSI-X" : "MSI", msi->msi_irq,
309 msi->msi_cpu, msi->msi_vector);
310 }
311 for (i = 1; i < msi->msi_count; i++) {
312 sib = (struct msi_intsrc *)intr_lookup_source(
313 msi->msi_irqs[i]);
314 sib->msi_cpu = apic_id;
315 sib->msi_vector = vector + i;
316 if (bootverbose)
317 printf("msi: Assigning MSI IRQ %d to local APIC %u vector %u\n",
318 sib->msi_irq, sib->msi_cpu,
319 sib->msi_vector);
320 }
321 } else {
322 device_printf(msi->msi_dev,
323 "remap irq %u to APIC ID %u failed (error %d)\n",
324 msi->msi_irq, apic_id, error);
325 msi->msi_cpu = old_id;
326 msi->msi_vector = old_vector;
327 old_id = apic_id;
328 old_vector = vector;
329 }
330
331 /*
332 * Free the old vector after the new one is established. This is done
333 * to prevent races where we could miss an interrupt. If BUS_REMAP_INTR
334 * failed then we disable and free the new, unused vector(s).
335 */
336 if (msi->msi_enabled > 0) {
337 for (i = 0; i < msi->msi_count; i++)
338 apic_disable_vector(old_id, old_vector + i);
339 }
340 apic_free_vector(old_id, old_vector, msi->msi_irq);
341 for (i = 1; i < msi->msi_count; i++)
342 apic_free_vector(old_id, old_vector + i, msi->msi_irqs[i]);
343 return (error);
344 }
345
346 void
msi_init(void)347 msi_init(void)
348 {
349
350 /* Check if we have a supported CPU. */
351 switch (cpu_vendor_id) {
352 case CPU_VENDOR_INTEL:
353 case CPU_VENDOR_AMD:
354 case CPU_VENDOR_HYGON:
355 break;
356 case CPU_VENDOR_CENTAUR:
357 if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
358 CPUID_TO_MODEL(cpu_id) >= 0xf)
359 break;
360 /* FALLTHROUGH */
361 default:
362 return;
363 }
364
365 #ifdef SMP
366 if (msix_disable_migration == -1) {
367 /* The default is to allow migration of MSI-X interrupts. */
368 msix_disable_migration = 0;
369 }
370 #endif
371
372 if (num_msi_irqs == 0)
373 return;
374
375 first_msi_irq = num_io_irqs;
376 if (num_msi_irqs > UINT_MAX - first_msi_irq)
377 panic("num_msi_irqs too high");
378 num_io_irqs = first_msi_irq + num_msi_irqs;
379
380 msi_enabled = 1;
381 intr_register_pic(&msi_pic);
382 mtx_init(&msi_lock, "msi", NULL, MTX_DEF);
383 }
384
385 static void
msi_create_source(void)386 msi_create_source(void)
387 {
388 struct msi_intsrc *msi;
389 u_int irq;
390
391 mtx_lock(&msi_lock);
392 if (msi_last_irq >= num_msi_irqs) {
393 mtx_unlock(&msi_lock);
394 return;
395 }
396 irq = msi_last_irq + first_msi_irq;
397 msi_last_irq++;
398 mtx_unlock(&msi_lock);
399
400 msi = malloc(sizeof(struct msi_intsrc), M_MSI, M_WAITOK | M_ZERO);
401 msi->msi_intsrc.is_pic = &msi_pic;
402 msi->msi_irq = irq;
403 intr_register_source(&msi->msi_intsrc);
404 nexus_add_irq(irq);
405 }
406
407 /*
408 * Try to allocate 'count' interrupt sources with contiguous IDT values.
409 */
410 int
msi_alloc(device_t dev,int count,int maxcount,int * irqs)411 msi_alloc(device_t dev, int count, int maxcount, int *irqs)
412 {
413 struct msi_intsrc *msi, *fsrc;
414 u_int cpu, domain, *mirqs;
415 int cnt, i, vector;
416 #ifdef IOMMU
417 u_int cookies[count];
418 int error;
419 #endif
420
421 if (!msi_enabled)
422 return (ENXIO);
423
424 if (bus_get_domain(dev, &domain) != 0)
425 domain = 0;
426
427 if (count > 1)
428 mirqs = malloc(count * sizeof(*mirqs), M_MSI, M_WAITOK);
429 else
430 mirqs = NULL;
431 again:
432 mtx_lock(&msi_lock);
433
434 /* Try to find 'count' free IRQs. */
435 cnt = 0;
436 for (i = first_msi_irq; i < first_msi_irq + num_msi_irqs; i++) {
437 msi = (struct msi_intsrc *)intr_lookup_source(i);
438
439 /* End of allocated sources, so break. */
440 if (msi == NULL)
441 break;
442
443 /* If this is a free one, save its IRQ in the array. */
444 if (msi->msi_dev == NULL) {
445 irqs[cnt] = i;
446 cnt++;
447 if (cnt == count)
448 break;
449 }
450 }
451
452 /* Do we need to create some new sources? */
453 if (cnt < count) {
454 /* If we would exceed the max, give up. */
455 if (i + (count - cnt) > first_msi_irq + num_msi_irqs) {
456 mtx_unlock(&msi_lock);
457 free(mirqs, M_MSI);
458 return (ENXIO);
459 }
460 mtx_unlock(&msi_lock);
461
462 /* We need count - cnt more sources. */
463 while (cnt < count) {
464 msi_create_source();
465 cnt++;
466 }
467 goto again;
468 }
469
470 /* Ok, we now have the IRQs allocated. */
471 KASSERT(cnt == count, ("count mismatch"));
472
473 /* Allocate 'count' IDT vectors. */
474 cpu = intr_next_cpu(domain);
475 vector = apic_alloc_vectors(cpu, irqs, count, maxcount);
476 if (vector == 0) {
477 mtx_unlock(&msi_lock);
478 free(mirqs, M_MSI);
479 return (ENOSPC);
480 }
481
482 #ifdef IOMMU
483 mtx_unlock(&msi_lock);
484 error = iommu_alloc_msi_intr(dev, cookies, count);
485 mtx_lock(&msi_lock);
486 if (error == EOPNOTSUPP)
487 error = 0;
488 if (error != 0) {
489 for (i = 0; i < count; i++)
490 apic_free_vector(cpu, vector + i, irqs[i]);
491 mtx_unlock(&msi_lock);
492 free(mirqs, M_MSI);
493 return (error);
494 }
495 for (i = 0; i < count; i++) {
496 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]);
497 msi->msi_remap_cookie = cookies[i];
498 }
499 #endif
500
501 /* Assign IDT vectors and make these messages owned by 'dev'. */
502 fsrc = (struct msi_intsrc *)intr_lookup_source(irqs[0]);
503 for (i = 0; i < count; i++) {
504 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]);
505 msi->msi_cpu = cpu;
506 msi->msi_dev = dev;
507 msi->msi_vector = vector + i;
508 if (bootverbose)
509 printf(
510 "msi: routing MSI IRQ %d to local APIC %u vector %u\n",
511 msi->msi_irq, msi->msi_cpu, msi->msi_vector);
512 msi->msi_first = fsrc;
513 KASSERT(msi->msi_intsrc.is_handlers == 0,
514 ("dead MSI has handlers"));
515 }
516 fsrc->msi_count = count;
517 fsrc->msi_maxcount = maxcount;
518 if (count > 1)
519 bcopy(irqs, mirqs, count * sizeof(*mirqs));
520 fsrc->msi_irqs = mirqs;
521 mtx_unlock(&msi_lock);
522 return (0);
523 }
524
525 int
msi_release(int * irqs,int count)526 msi_release(int *irqs, int count)
527 {
528 struct msi_intsrc *msi, *first;
529 int i;
530
531 mtx_lock(&msi_lock);
532 first = (struct msi_intsrc *)intr_lookup_source(irqs[0]);
533 if (first == NULL) {
534 mtx_unlock(&msi_lock);
535 return (ENOENT);
536 }
537
538 /* Make sure this isn't an MSI-X message. */
539 if (first->msi_msix) {
540 mtx_unlock(&msi_lock);
541 return (EINVAL);
542 }
543
544 /* Make sure this message is allocated to a group. */
545 if (first->msi_first == NULL) {
546 mtx_unlock(&msi_lock);
547 return (ENXIO);
548 }
549
550 /*
551 * Make sure this is the start of a group and that we are releasing
552 * the entire group.
553 */
554 if (first->msi_first != first || first->msi_count != count) {
555 mtx_unlock(&msi_lock);
556 return (EINVAL);
557 }
558 KASSERT(first->msi_dev != NULL, ("unowned group"));
559
560 /* Clear all the extra messages in the group. */
561 for (i = 1; i < count; i++) {
562 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]);
563 KASSERT(msi->msi_first == first, ("message not in group"));
564 KASSERT(msi->msi_dev == first->msi_dev, ("owner mismatch"));
565 #ifdef IOMMU
566 mtx_unlock(&msi_lock);
567 iommu_unmap_msi_intr(first->msi_dev, msi->msi_remap_cookie);
568 mtx_lock(&msi_lock);
569 #endif
570 msi->msi_first = NULL;
571 msi->msi_dev = NULL;
572 apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq);
573 msi->msi_vector = 0;
574 }
575
576 /* Clear out the first message. */
577 #ifdef IOMMU
578 mtx_unlock(&msi_lock);
579 iommu_unmap_msi_intr(first->msi_dev, first->msi_remap_cookie);
580 mtx_lock(&msi_lock);
581 #endif
582 first->msi_first = NULL;
583 first->msi_dev = NULL;
584 apic_free_vector(first->msi_cpu, first->msi_vector, first->msi_irq);
585 first->msi_vector = 0;
586 first->msi_count = 0;
587 first->msi_maxcount = 0;
588 free(first->msi_irqs, M_MSI);
589 first->msi_irqs = NULL;
590
591 mtx_unlock(&msi_lock);
592 return (0);
593 }
594
595 int
msi_map(int irq,uint64_t * addr,uint32_t * data)596 msi_map(int irq, uint64_t *addr, uint32_t *data)
597 {
598 struct msi_intsrc *msi;
599 int error;
600 #ifdef IOMMU
601 struct msi_intsrc *msi1;
602 int i, k;
603 #endif
604
605 mtx_lock(&msi_lock);
606 msi = (struct msi_intsrc *)intr_lookup_source(irq);
607 if (msi == NULL) {
608 mtx_unlock(&msi_lock);
609 return (ENOENT);
610 }
611
612 /* Make sure this message is allocated to a device. */
613 if (msi->msi_dev == NULL) {
614 mtx_unlock(&msi_lock);
615 return (ENXIO);
616 }
617
618 /*
619 * If this message isn't an MSI-X message, make sure it's part
620 * of a group, and switch to the first message in the
621 * group.
622 */
623 if (!msi->msi_msix) {
624 if (msi->msi_first == NULL) {
625 mtx_unlock(&msi_lock);
626 return (ENXIO);
627 }
628 msi = msi->msi_first;
629 }
630
631 #ifdef IOMMU
632 if (!msi->msi_msix) {
633 for (k = msi->msi_count - 1, i = first_msi_irq; k > 0 &&
634 i < first_msi_irq + num_msi_irqs; i++) {
635 if (i == msi->msi_irq)
636 continue;
637 msi1 = (struct msi_intsrc *)intr_lookup_source(i);
638 if (!msi1->msi_msix && msi1->msi_first == msi) {
639 mtx_unlock(&msi_lock);
640 iommu_map_msi_intr(msi1->msi_dev,
641 msi1->msi_cpu, msi1->msi_vector,
642 msi1->msi_remap_cookie, NULL, NULL);
643 k--;
644 mtx_lock(&msi_lock);
645 }
646 }
647 }
648 mtx_unlock(&msi_lock);
649 error = iommu_map_msi_intr(msi->msi_dev, msi->msi_cpu,
650 msi->msi_vector, msi->msi_remap_cookie, addr, data);
651 #else
652 mtx_unlock(&msi_lock);
653 error = EOPNOTSUPP;
654 #endif
655 if (error == EOPNOTSUPP && msi->msi_cpu > 0xff) {
656 printf("%s: unsupported destination APIC ID %u\n", __func__,
657 msi->msi_cpu);
658 error = EINVAL;
659 }
660 if (error == EOPNOTSUPP) {
661 *addr = INTEL_ADDR(msi);
662 *data = INTEL_DATA(msi);
663 error = 0;
664 }
665 return (error);
666 }
667
668 int
msix_alloc(device_t dev,int * irq)669 msix_alloc(device_t dev, int *irq)
670 {
671 struct msi_intsrc *msi;
672 u_int cpu, domain;
673 int i, vector;
674 #ifdef IOMMU
675 u_int cookie;
676 int error;
677 #endif
678
679 if (!msi_enabled)
680 return (ENXIO);
681
682 if (bus_get_domain(dev, &domain) != 0)
683 domain = 0;
684
685 again:
686 mtx_lock(&msi_lock);
687
688 /* Find a free IRQ. */
689 for (i = first_msi_irq; i < first_msi_irq + num_msi_irqs; i++) {
690 msi = (struct msi_intsrc *)intr_lookup_source(i);
691
692 /* End of allocated sources, so break. */
693 if (msi == NULL)
694 break;
695
696 /* Stop at the first free source. */
697 if (msi->msi_dev == NULL)
698 break;
699 }
700
701 /* Are all IRQs in use? */
702 if (i == first_msi_irq + num_msi_irqs) {
703 mtx_unlock(&msi_lock);
704 return (ENXIO);
705 }
706
707 /* Do we need to create a new source? */
708 if (msi == NULL) {
709 mtx_unlock(&msi_lock);
710
711 /* Create a new source. */
712 msi_create_source();
713 goto again;
714 }
715
716 /* Allocate an IDT vector. */
717 cpu = intr_next_cpu(domain);
718 vector = apic_alloc_vector(cpu, i);
719 if (vector == 0) {
720 mtx_unlock(&msi_lock);
721 return (ENOSPC);
722 }
723
724 msi->msi_dev = dev;
725 #ifdef IOMMU
726 mtx_unlock(&msi_lock);
727 error = iommu_alloc_msi_intr(dev, &cookie, 1);
728 mtx_lock(&msi_lock);
729 if (error == EOPNOTSUPP)
730 error = 0;
731 if (error != 0) {
732 msi->msi_dev = NULL;
733 apic_free_vector(cpu, vector, i);
734 mtx_unlock(&msi_lock);
735 return (error);
736 }
737 msi->msi_remap_cookie = cookie;
738 #endif
739
740 if (bootverbose)
741 printf("msi: routing MSI-X IRQ %d to local APIC %u vector %u\n",
742 msi->msi_irq, cpu, vector);
743
744 /* Setup source. */
745 msi->msi_cpu = cpu;
746 msi->msi_first = msi;
747 msi->msi_vector = vector;
748 msi->msi_msix = true;
749 msi->msi_count = 1;
750 msi->msi_maxcount = 1;
751 msi->msi_irqs = NULL;
752
753 KASSERT(msi->msi_intsrc.is_handlers == 0, ("dead MSI-X has handlers"));
754 mtx_unlock(&msi_lock);
755
756 *irq = i;
757 return (0);
758 }
759
760 int
msix_release(int irq)761 msix_release(int irq)
762 {
763 struct msi_intsrc *msi;
764
765 mtx_lock(&msi_lock);
766 msi = (struct msi_intsrc *)intr_lookup_source(irq);
767 if (msi == NULL) {
768 mtx_unlock(&msi_lock);
769 return (ENOENT);
770 }
771
772 /* Make sure this is an MSI-X message. */
773 if (!msi->msi_msix) {
774 mtx_unlock(&msi_lock);
775 return (EINVAL);
776 }
777
778 KASSERT(msi->msi_dev != NULL, ("unowned message"));
779
780 /* Clear out the message. */
781 #ifdef IOMMU
782 mtx_unlock(&msi_lock);
783 iommu_unmap_msi_intr(msi->msi_dev, msi->msi_remap_cookie);
784 mtx_lock(&msi_lock);
785 #endif
786 msi->msi_first = NULL;
787 msi->msi_dev = NULL;
788 apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq);
789 msi->msi_vector = 0;
790 msi->msi_msix = false;
791 msi->msi_count = 0;
792 msi->msi_maxcount = 0;
793
794 mtx_unlock(&msi_lock);
795 return (0);
796 }
797