xref: /titanic_41/usr/src/uts/i86xpv/io/psm/xpv_uppc.c (revision 5203bc321053fb87d7073c7640548fab73634793)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #define	PSMI_1_7
28 
29 #include <sys/mutex.h>
30 #include <sys/types.h>
31 #include <sys/time.h>
32 #include <sys/clock.h>
33 #include <sys/machlock.h>
34 #include <sys/smp_impldefs.h>
35 #include <sys/uadmin.h>
36 #include <sys/promif.h>
37 #include <sys/psm.h>
38 #include <sys/psm_common.h>
39 #include <sys/atomic.h>
40 #include <sys/archsystm.h>
41 #include <sys/mach_intr.h>
42 #include <sys/hypervisor.h>
43 #include <sys/evtchn_impl.h>
44 #include <sys/modctl.h>
45 #include <sys/trap.h>
46 #include <sys/panic.h>
47 
48 #include <xen/public/vcpu.h>
49 #include <xen/public/physdev.h>
50 
51 
52 /*
53  * Global Data
54  */
55 int xen_uppc_use_acpi = 1;	/* Use ACPI by default */
56 int xen_uppc_enable_acpi = 0;
57 
58 static int xen_clock_irq = -1;
59 
60 /*
61  * For interrupt link devices, if xen_uppc_unconditional_srs is set, an irq
62  * resource will be assigned (via _SRS). If it is not set, use the current
63  * irq setting (via _CRS), but only if that irq is in the set of possible
64  * irqs (returned by _PRS) for the device.
65  */
66 int xen_uppc_unconditional_srs = 1;
67 
68 /*
69  * For interrupt link devices, if xen_uppc_prefer_crs is set when we are
70  * assigning an IRQ resource to a device, prefer the current IRQ setting
71  * over other possible irq settings under same conditions.
72  */
73 int xen_uppc_prefer_crs = 1;
74 
75 int xen_uppc_verbose = 0;
76 
77 /* flag definitions for xen_uppc_verbose */
78 #define	XEN_UPPC_VERBOSE_IRQ_FLAG		0x00000001
79 #define	XEN_UPPC_VERBOSE_POWEROFF_FLAG		0x00000002
80 #define	XEN_UPPC_VERBOSE_POWEROFF_PAUSE_FLAG	0x00000004
81 
82 #define	XEN_UPPC_VERBOSE_IRQ(fmt) \
83 	if (xen_uppc_verbose & XEN_UPPC_VERBOSE_IRQ_FLAG) \
84 		cmn_err fmt;
85 
86 #define	XEN_UPPC_VERBOSE_POWEROFF(fmt) \
87 	if (xen_uppc_verbose & XEN_UPPC_VERBOSE_POWEROFF_FLAG) \
88 		prom_printf fmt;
89 
90 uchar_t xen_uppc_reserved_irqlist[MAX_ISA_IRQ + 1];
91 
92 static uint16_t xen_uppc_irq_shared_table[MAX_ISA_IRQ + 1];
93 
94 /*
95  * Contains SCI irqno from FADT after initialization
96  */
97 static int xen_uppc_sci = -1;
98 
99 static struct psm_info xen_uppc_info;
100 
101 /*
102  * Local support routines
103  */
104 
105 static int
106 xen_uppc_init_acpi(void)
107 {
108 	int verboseflags = 0;
109 	int	sci;
110 	iflag_t sci_flags;
111 
112 	/*
113 	 * Process SCI configuration here; this may return
114 	 * an error if acpi-user-options has specified
115 	 * legacy mode (use ACPI without ACPI mode or SCI)
116 	 */
117 	if (acpica_get_sci(&sci, &sci_flags) != AE_OK)
118 		sci = -1;
119 
120 	/*
121 	 * Initialize sub-system - if error is returns, ACPI is not
122 	 * used.
123 	 */
124 	if (acpica_init() != AE_OK)
125 		return (0);
126 
127 	/*
128 	 * uppc implies system is in PIC mode; set edge/level
129 	 * via ELCR based on return value from get_sci; this
130 	 * will default to level/low if no override present,
131 	 * as recommended by Intel ACPI CA team.
132 	 */
133 	if (sci >= 0) {
134 		ASSERT((sci_flags.intr_el == INTR_EL_LEVEL) ||
135 		    (sci_flags.intr_el == INTR_EL_EDGE));
136 
137 		psm_set_elcr(sci, sci_flags.intr_el == INTR_EL_LEVEL);
138 	}
139 
140 	/*
141 	 * Remember SCI for later use
142 	 */
143 	xen_uppc_sci = sci;
144 
145 	if (xen_uppc_verbose & XEN_UPPC_VERBOSE_IRQ_FLAG)
146 		verboseflags |= PSM_VERBOSE_IRQ_FLAG;
147 
148 	if (xen_uppc_verbose & XEN_UPPC_VERBOSE_POWEROFF_FLAG)
149 		verboseflags |= PSM_VERBOSE_POWEROFF_FLAG;
150 
151 	if (xen_uppc_verbose & XEN_UPPC_VERBOSE_POWEROFF_PAUSE_FLAG)
152 		verboseflags |= PSM_VERBOSE_POWEROFF_PAUSE_FLAG;
153 
154 	if (acpi_psm_init(xen_uppc_info.p_mach_idstring, verboseflags) ==
155 	    ACPI_PSM_FAILURE) {
156 		return (0);
157 	}
158 
159 	return (1);
160 }
161 
162 /*
163  * Autoconfiguration Routines
164  */
165 
166 static int
167 xen_uppc_probe(void)
168 {
169 
170 	return (PSM_SUCCESS);
171 }
172 
173 static void
174 xen_uppc_softinit(void)
175 {
176 	int i;
177 
178 	/* LINTED logical expression always true: op "||" */
179 	ASSERT((1 << EVTCHN_SHIFT) == NBBY * sizeof (ulong_t));
180 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
181 		if (xen_uppc_use_acpi && xen_uppc_init_acpi()) {
182 			build_reserved_irqlist((uchar_t *)
183 			    xen_uppc_reserved_irqlist);
184 			for (i = 0; i <= MAX_ISA_IRQ; i++)
185 				xen_uppc_irq_shared_table[i] = 0;
186 			xen_uppc_enable_acpi = 1;
187 		}
188 	}
189 }
190 
191 
192 #define	XEN_NSEC_PER_TICK	10 /* XXX - assume we have a 100 Mhz clock */
193 
194 /*ARGSUSED*/
195 static int
196 xen_uppc_clkinit(int hertz)
197 {
198 	extern enum tod_fault_type tod_fault(enum tod_fault_type, int);
199 	extern int dosynctodr;
200 
201 	/*
202 	 * domU cannot set the TOD hardware, fault the TOD clock now to
203 	 * indicate that and turn off attempts to sync TOD hardware
204 	 * with the hires timer.
205 	 */
206 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
207 		mutex_enter(&tod_lock);
208 		(void) tod_fault(TOD_RDONLY, 0);
209 		dosynctodr = 0;
210 		mutex_exit(&tod_lock);
211 	}
212 	/*
213 	 * The hypervisor provides a timer based on the local APIC timer.
214 	 * The interface supports requests of nanosecond resolution.
215 	 * A common frequency of the apic clock is 100 Mhz which
216 	 * gives a resolution of 10 nsec per tick.  What we would really like
217 	 * is a way to get the ns per tick value from xen.
218 	 * XXPV - This is an assumption that needs checking and may change
219 	 */
220 	return (XEN_NSEC_PER_TICK);
221 }
222 
223 static void
224 xen_uppc_picinit()
225 {
226 	int irqno;
227 
228 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
229 #if 0
230 		/* hypervisor initializes the 8259, don't mess with it */
231 		picsetup();	 /* initialise the 8259 */
232 #endif
233 		/*
234 		 * We never called xen_uppc_addspl() when the SCI
235 		 * interrupt was added because that happened before the
236 		 * PSM module was loaded.  Fix that up here by doing
237 		 * any missed operations (e.g. bind to CPU)
238 		 */
239 		if ((irqno = xen_uppc_sci) >= 0) {
240 			ec_enable_irq(irqno);
241 		}
242 	}
243 }
244 
245 
246 /*ARGSUSED*/
247 static int
248 xen_uppc_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
249 {
250 	int ret = PSM_SUCCESS;
251 	cpuset_t cpus;
252 
253 	if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
254 		atomic_add_16(&xen_uppc_irq_shared_table[irqno], 1);
255 
256 	/*
257 	 * We are called at splhi() so we can't call anything that might end
258 	 * up trying to context switch.
259 	 */
260 	if (irqno >= PIRQ_BASE && irqno < NR_PIRQS &&
261 	    DOMAIN_IS_INITDOMAIN(xen_info)) {
262 		CPUSET_ZERO(cpus);
263 		CPUSET_ADD(cpus, 0);
264 		ec_setup_pirq(irqno, ipl, &cpus);
265 	} else {
266 		/*
267 		 * Set priority/affinity/enable for non PIRQs
268 		 */
269 		ret = ec_set_irq_priority(irqno, ipl);
270 		ASSERT(ret == 0);
271 		CPUSET_ZERO(cpus);
272 		CPUSET_ADD(cpus, 0);
273 		ec_set_irq_affinity(irqno, cpus);
274 		ec_enable_irq(irqno);
275 	}
276 
277 	return (ret);
278 }
279 
280 /*ARGSUSED*/
281 static int
282 xen_uppc_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
283 {
284 	int err = PSM_SUCCESS;
285 
286 	if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
287 		atomic_add_16(&xen_uppc_irq_shared_table[irqno], -1);
288 
289 	if (irqno >= PIRQ_BASE && irqno < NR_PIRQS &&
290 	    DOMAIN_IS_INITDOMAIN(xen_info)) {
291 		if (max_ipl == PSM_INVALID_IPL) {
292 			/*
293 			 * unbind if no more sharers of this irq/evtchn
294 			 */
295 			(void) ec_block_irq(irqno);
296 			ec_unbind_irq(irqno);
297 		} else {
298 			/*
299 			 * If still in use reset priority
300 			 */
301 			err = ec_set_irq_priority(irqno, max_ipl);
302 		}
303 	} else {
304 		(void) ec_block_irq(irqno);
305 		ec_unbind_irq(irqno);
306 	}
307 	return (err);
308 }
309 
310 static processorid_t
311 xen_uppc_get_next_processorid(processorid_t id)
312 {
313 	if (id == -1)
314 		return (0);
315 	return (-1);
316 }
317 
318 /*ARGSUSED*/
319 static int
320 xen_uppc_get_clockirq(int ipl)
321 {
322 	if (xen_clock_irq != -1)
323 		return (xen_clock_irq);
324 
325 	xen_clock_irq = ec_bind_virq_to_irq(VIRQ_TIMER, 0);
326 	return (xen_clock_irq);
327 }
328 
329 /*ARGSUSED*/
330 static void
331 xen_uppc_shutdown(int cmd, int fcn)
332 {
333 	XEN_UPPC_VERBOSE_POWEROFF(("xen_uppc_shutdown(%d,%d);\n", cmd, fcn));
334 
335 	switch (cmd) {
336 	case A_SHUTDOWN:
337 		switch (fcn) {
338 		case AD_BOOT:
339 		case AD_IBOOT:
340 			(void) HYPERVISOR_shutdown(SHUTDOWN_reboot);
341 			break;
342 		case AD_POWEROFF:
343 			/* fall through if domU or if poweroff fails */
344 			if (DOMAIN_IS_INITDOMAIN(xen_info))
345 				if (xen_uppc_enable_acpi)
346 					(void) acpi_poweroff();
347 			/* FALLTHRU */
348 		case AD_HALT:
349 		default:
350 			(void) HYPERVISOR_shutdown(SHUTDOWN_poweroff);
351 			break;
352 		}
353 		break;
354 	case A_REBOOT:
355 		(void) HYPERVISOR_shutdown(SHUTDOWN_reboot);
356 		break;
357 	default:
358 		return;
359 	}
360 }
361 
362 
363 /*
364  * This function will reprogram the timer.
365  *
366  * When in oneshot mode the argument is the absolute time in future at which to
367  * generate the interrupt.
368  *
369  * When in periodic mode, the argument is the interval at which the
370  * interrupts should be generated. There is no need to support the periodic
371  * mode timer change at this time.
372  *
373  * Note that we must be careful to convert from hrtime to Xen system time (see
374  * xpv_timestamp.c).
375  */
376 static void
377 xen_uppc_timer_reprogram(hrtime_t timer_req)
378 {
379 	hrtime_t now, timer_new, time_delta, xen_time;
380 	ulong_t flags;
381 
382 	flags = intr_clear();
383 	/*
384 	 * We should be called from high PIL context (CBE_HIGH_PIL),
385 	 * so kpreempt is disabled.
386 	 */
387 
388 	now = xpv_gethrtime();
389 	xen_time = xpv_getsystime();
390 	if (timer_req <= now) {
391 		/*
392 		 * requested to generate an interrupt in the past
393 		 * generate an interrupt as soon as possible
394 		 */
395 		time_delta = XEN_NSEC_PER_TICK;
396 	} else
397 		time_delta = timer_req - now;
398 
399 	timer_new = xen_time + time_delta;
400 	if (HYPERVISOR_set_timer_op(timer_new) != 0)
401 		panic("can't set hypervisor timer?");
402 	intr_restore(flags);
403 }
404 
405 /*
406  * This function will enable timer interrupts.
407  */
408 static void
409 xen_uppc_timer_enable(void)
410 {
411 	ec_unmask_irq(xen_clock_irq);
412 }
413 
414 /*
415  * This function will disable timer interrupts on the current cpu.
416  */
417 static void
418 xen_uppc_timer_disable(void)
419 {
420 	(void) ec_block_irq(xen_clock_irq);
421 	/*
422 	 * If the clock irq is pending on this cpu then we need to
423 	 * clear the pending interrupt.
424 	 */
425 	ec_unpend_irq(xen_clock_irq);
426 }
427 
428 
429 /*
430  * Configures the irq for the interrupt link device identified by
431  * acpipsmlnkp.
432  *
433  * Gets the current and the list of possible irq settings for the
434  * device. If xen_uppc_unconditional_srs is not set, and the current
435  * resource setting is in the list of possible irq settings,
436  * current irq resource setting is passed to the caller.
437  *
438  * Otherwise, picks an irq number from the list of possible irq
439  * settings, and sets the irq of the device to this value.
440  * If prefer_crs is set, among a set of irq numbers in the list that have
441  * the least number of devices sharing the interrupt, we pick current irq
442  * resource setting if it is a member of this set.
443  *
444  * Passes the irq number in the value pointed to by pci_irqp, and
445  * polarity and sensitivity in the structure pointed to by dipintrflagp
446  * to the caller.
447  *
448  * Note that if setting the irq resource failed, but successfuly obtained
449  * the current irq resource settings, passes the current irq resources
450  * and considers it a success.
451  *
452  * Returns:
453  * ACPI_PSM_SUCCESS on success.
454  *
455  * ACPI_PSM_FAILURE if an error occured during the configuration or
456  * if a suitable irq was not found for this device, or if setting the
457  * irq resource and obtaining the current resource fails.
458  *
459  */
460 static int
461 xen_uppc_acpi_irq_configure(acpi_psm_lnk_t *acpipsmlnkp, dev_info_t *dip,
462     int *pci_irqp, iflag_t *dipintr_flagp)
463 {
464 	int i, min_share, foundnow, done = 0;
465 	int32_t irq;
466 	int32_t share_irq = -1;
467 	int32_t chosen_irq = -1;
468 	int cur_irq = -1;
469 	acpi_irqlist_t *irqlistp;
470 	acpi_irqlist_t *irqlistent;
471 
472 	if ((acpi_get_possible_irq_resources(acpipsmlnkp, &irqlistp))
473 	    == ACPI_PSM_FAILURE) {
474 		XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: Unable to determine "
475 		    "or assign IRQ for device %s, instance #%d: The system was "
476 		    "unable to get the list of potential IRQs from ACPI.",
477 		    ddi_get_name(dip), ddi_get_instance(dip)));
478 
479 		return (ACPI_PSM_FAILURE);
480 	}
481 
482 	if ((acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq,
483 	    dipintr_flagp) == ACPI_PSM_SUCCESS) &&
484 	    (!xen_uppc_unconditional_srs) &&
485 	    (cur_irq > 0)) {
486 
487 		if (acpi_irqlist_find_irq(irqlistp, cur_irq, NULL)
488 		    == ACPI_PSM_SUCCESS) {
489 
490 			acpi_free_irqlist(irqlistp);
491 			ASSERT(pci_irqp != NULL);
492 			*pci_irqp = cur_irq;
493 			return (ACPI_PSM_SUCCESS);
494 		}
495 		XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: Could not find the "
496 		    "current irq %d for device %s, instance #%d in ACPI's "
497 		    "list of possible irqs for this device. Picking one from "
498 		    " the latter list.", cur_irq, ddi_get_name(dip),
499 		    ddi_get_instance(dip)));
500 
501 	}
502 
503 	irqlistent = irqlistp;
504 	min_share = 255;
505 
506 	while (irqlistent != NULL) {
507 
508 		for (foundnow = 0, i = 0; i < irqlistent->num_irqs; i++) {
509 
510 			irq = irqlistp->irqs[i];
511 
512 			if ((irq > MAX_ISA_IRQ) ||
513 			    (irqlistent->intr_flags.intr_el == INTR_EL_EDGE) ||
514 			    (irq == 0))
515 				continue;
516 
517 			if (xen_uppc_reserved_irqlist[irq])
518 				continue;
519 
520 			if (xen_uppc_irq_shared_table[irq] == 0) {
521 				chosen_irq = irq;
522 				foundnow = 1;
523 				if (!(xen_uppc_prefer_crs) ||
524 				    (irq == cur_irq)) {
525 					done = 1;
526 					break;
527 				}
528 			}
529 
530 			if ((xen_uppc_irq_shared_table[irq] < min_share) ||
531 			    ((xen_uppc_irq_shared_table[irq] == min_share) &&
532 			    (cur_irq == irq) && (xen_uppc_prefer_crs))) {
533 				min_share = xen_uppc_irq_shared_table[irq];
534 				share_irq = irq;
535 				foundnow = 1;
536 			}
537 		}
538 
539 		/* If we found an IRQ in the inner loop, save the details */
540 		if (foundnow && ((chosen_irq != -1) || (share_irq != -1))) {
541 			/*
542 			 * Copy the acpi_prs_private_t and flags from this
543 			 * irq list entry, since we found an irq from this
544 			 * entry.
545 			 */
546 			acpipsmlnkp->acpi_prs_prv = irqlistent->acpi_prs_prv;
547 			*dipintr_flagp = irqlistent->intr_flags;
548 		}
549 
550 		if (done)
551 			break;
552 
553 		/* Load the next entry in the irqlist */
554 		irqlistent = irqlistent->next;
555 	}
556 
557 	acpi_free_irqlist(irqlistp);
558 
559 	if (chosen_irq != -1)
560 		irq = chosen_irq;
561 	else if (share_irq != -1)
562 		irq = share_irq;
563 	else {
564 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Could not find a "
565 		    "suitable irq from the list of possible irqs for device "
566 		    "%s, instance #%d in ACPI's list of possible\n",
567 		    ddi_get_name(dip), ddi_get_instance(dip)));
568 
569 		return (ACPI_PSM_FAILURE);
570 	}
571 
572 
573 	XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Setting irq %d "
574 	    "for device %s instance #%d\n", irq, ddi_get_name(dip),
575 	    ddi_get_instance(dip)));
576 
577 	if ((acpi_set_irq_resource(acpipsmlnkp, irq)) == ACPI_PSM_SUCCESS) {
578 		/*
579 		 * setting irq was successful, check to make sure CRS
580 		 * reflects that. If CRS does not agree with what we
581 		 * set, return the irq that was set.
582 		 */
583 
584 		if (acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq,
585 		    dipintr_flagp) == ACPI_PSM_SUCCESS) {
586 
587 			if (cur_irq != irq)
588 				XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: "
589 				    "IRQ resource set (irqno %d) for device %s "
590 				    "instance #%d, differs from current "
591 				    "setting irqno %d",
592 				    irq, ddi_get_name(dip),
593 				    ddi_get_instance(dip), cur_irq));
594 		}
595 		/*
596 		 * return the irq that was set, and not what CRS reports,
597 		 * since CRS has been seen to be bogus on some systems
598 		 */
599 		cur_irq = irq;
600 	} else {
601 		XEN_UPPC_VERBOSE_IRQ((CE_WARN, "!xVM_uppc: set resource irq %d "
602 		    "failed for device %s instance #%d",
603 		    irq, ddi_get_name(dip), ddi_get_instance(dip)));
604 		if (cur_irq == -1)
605 			return (ACPI_PSM_FAILURE);
606 	}
607 
608 	ASSERT(pci_irqp != NULL);
609 	*pci_irqp = cur_irq;
610 	return (ACPI_PSM_SUCCESS);
611 }
612 
613 
614 static int
615 xen_uppc_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid,
616     int ipin, int *pci_irqp, iflag_t *intr_flagp)
617 {
618 	int status;
619 	acpi_psm_lnk_t acpipsmlnk;
620 
621 	if ((status = acpi_get_irq_cache_ent(busid, devid, ipin, pci_irqp,
622 	    intr_flagp)) == ACPI_PSM_SUCCESS) {
623 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: Found irqno %d "
624 		    "from cache for device %s, instance #%d\n", *pci_irqp,
625 		    ddi_get_name(dip), ddi_get_instance(dip)));
626 		return (status);
627 	}
628 
629 	bzero(&acpipsmlnk, sizeof (acpi_psm_lnk_t));
630 
631 	if ((status = acpi_translate_pci_irq(dip, ipin, pci_irqp,
632 	    intr_flagp, &acpipsmlnk)) == ACPI_PSM_FAILURE) {
633 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: "
634 		    " acpi_translate_pci_irq failed for device %s, instance"
635 		    " #%d\n", ddi_get_name(dip), ddi_get_instance(dip)));
636 
637 		return (status);
638 	}
639 
640 	if (status == ACPI_PSM_PARTIAL && acpipsmlnk.lnkobj != NULL) {
641 		status = xen_uppc_acpi_irq_configure(&acpipsmlnk, dip, pci_irqp,
642 		    intr_flagp);
643 		if (status != ACPI_PSM_SUCCESS) {
644 			status = acpi_get_current_irq_resource(&acpipsmlnk,
645 			    pci_irqp, intr_flagp);
646 		}
647 	}
648 
649 	if (status == ACPI_PSM_SUCCESS) {
650 		acpi_new_irq_cache_ent(busid, devid, ipin, *pci_irqp,
651 		    intr_flagp, &acpipsmlnk);
652 		psm_set_elcr(*pci_irqp, 1); 	/* set IRQ to PCI mode */
653 
654 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: [ACPI] "
655 		    "new irq %d for device %s, instance #%d\n",
656 		    *pci_irqp, ddi_get_name(dip), ddi_get_instance(dip)));
657 	}
658 
659 	return (status);
660 }
661 
662 
663 /*ARGSUSED*/
664 static int
665 xen_uppc_translate_irq(dev_info_t *dip, int irqno)
666 {
667 	char dev_type[16];
668 	int dev_len, pci_irq, devid, busid;
669 	ddi_acc_handle_t cfg_handle;
670 	uchar_t ipin, iline;
671 	iflag_t intr_flag;
672 
673 	if (dip == NULL) {
674 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: irqno = %d"
675 		    " dip = NULL\n", irqno));
676 		return (irqno);
677 	}
678 
679 	if (!xen_uppc_enable_acpi) {
680 		return (irqno);
681 	}
682 
683 	dev_len = sizeof (dev_type);
684 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ddi_get_parent(dip),
685 	    DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type,
686 	    &dev_len) != DDI_PROP_SUCCESS) {
687 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: irqno %d"
688 		    " device %s instance %d no device_type\n", irqno,
689 		    ddi_get_name(dip), ddi_get_instance(dip)));
690 		return (irqno);
691 	}
692 
693 	if ((strcmp(dev_type, "pci") == 0) ||
694 	    (strcmp(dev_type, "pciex") == 0)) {
695 
696 		/* pci device */
697 		if (acpica_get_bdf(dip, &busid, &devid, NULL) != 0)
698 			return (irqno);
699 
700 		if (pci_config_setup(dip, &cfg_handle) != DDI_SUCCESS)
701 			return (irqno);
702 
703 		ipin = pci_config_get8(cfg_handle, PCI_CONF_IPIN) - PCI_INTA;
704 		iline = pci_config_get8(cfg_handle, PCI_CONF_ILINE);
705 		if (xen_uppc_acpi_translate_pci_irq(dip, busid, devid,
706 		    ipin, &pci_irq, &intr_flag) == ACPI_PSM_SUCCESS) {
707 
708 			XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: [ACPI] "
709 			    "new irq %d old irq %d device %s, instance %d\n",
710 			    pci_irq, irqno, ddi_get_name(dip),
711 			    ddi_get_instance(dip)));
712 
713 			/*
714 			 * Make sure pci_irq is within range.
715 			 * Otherwise, fall through and return irqno.
716 			 */
717 			if (pci_irq <= MAX_ISA_IRQ) {
718 				if (iline != pci_irq) {
719 					/*
720 					 * Update the device's ILINE byte,
721 					 * in case uppc_acpi_translate_pci_irq
722 					 * has choosen a different pci_irq
723 					 * than the BIOS has configured.
724 					 * Some chipsets use the value in
725 					 * ILINE to control interrupt routing,
726 					 * in conflict with the PCI spec.
727 					 */
728 					pci_config_put8(cfg_handle,
729 					    PCI_CONF_ILINE, pci_irq);
730 				}
731 				pci_config_teardown(&cfg_handle);
732 				return (pci_irq);
733 			}
734 		}
735 		pci_config_teardown(&cfg_handle);
736 
737 		/* FALLTHRU to common case - returning irqno */
738 	} else {
739 		/* non-PCI; assumes ISA-style edge-triggered */
740 		psm_set_elcr(irqno, 0); 	/* set IRQ to ISA mode */
741 
742 		XEN_UPPC_VERBOSE_IRQ((CE_CONT, "!xVM_uppc: non-pci,"
743 		    "irqno %d device %s instance %d\n", irqno,
744 		    ddi_get_name(dip), ddi_get_instance(dip)));
745 	}
746 
747 	return (irqno);
748 }
749 
750 /*
751  * xen_uppc_intr_enter() acks the event that triggered the interrupt and
752  * returns the new priority level,
753  */
754 /*ARGSUSED*/
755 static int
756 xen_uppc_intr_enter(int ipl, int *vector)
757 {
758 	int newipl;
759 	uint_t intno;
760 	cpu_t *cpu = CPU;
761 
762 	intno = (*vector);
763 
764 	ASSERT(intno < NR_IRQS);
765 	ASSERT(cpu->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask != 0);
766 
767 	ec_clear_irq(intno);
768 
769 	newipl = autovect[intno].avh_hi_pri;
770 	if (newipl == 0) {
771 		/*
772 		 * (newipl == 0) means we have no service routines for this
773 		 * vector.  We will treat this as a spurious interrupt.
774 		 * We have cleared the pending bit already, clear the event
775 		 * mask and return a spurious interrupt.  This case can happen
776 		 * when an interrupt delivery is racing with the removal of
777 		 * of the service routine for that interrupt.
778 		 */
779 		ec_unmask_irq(intno);
780 		newipl = -1;	/* flag spurious interrupt */
781 	} else if (newipl <= cpu->cpu_pri) {
782 		/*
783 		 * (newipl <= cpu->cpu_pri) means that we must be trying to
784 		 * service a vector that was shared with a higher priority
785 		 * isr.  The higher priority handler has been removed and
786 		 * we need to service this int.  We can't return a lower
787 		 * priority than current cpu priority.  Just synthesize a
788 		 * priority to return that should be acceptable.
789 		 */
790 		newipl = cpu->cpu_pri + 1;	/* synthetic priority */
791 	}
792 	return (newipl);
793 }
794 
795 
796 static void xen_uppc_setspl(int);
797 
798 /*
799  * xen_uppc_intr_exit() restores the old interrupt
800  * priority level after processing an interrupt.
801  * It is called with interrupts disabled, and does not enable interrupts.
802  */
803 /* ARGSUSED */
804 static void
805 xen_uppc_intr_exit(int ipl, int vector)
806 {
807 	ec_try_unmask_irq(vector);
808 	xen_uppc_setspl(ipl);
809 }
810 
811 intr_exit_fn_t
812 psm_intr_exit_fn(void)
813 {
814 	return (xen_uppc_intr_exit);
815 }
816 
817 /*
818  * Check if new ipl level allows delivery of previously unserviced events
819  */
820 static void
821 xen_uppc_setspl(int ipl)
822 {
823 	struct cpu *cpu = CPU;
824 	volatile vcpu_info_t *vci = cpu->cpu_m.mcpu_vcpu_info;
825 	uint16_t pending;
826 
827 	ASSERT(vci->evtchn_upcall_mask != 0);
828 
829 	/*
830 	 * If new ipl level will enable any pending interrupts, setup so the
831 	 * upcoming sti will cause us to get an upcall.
832 	 */
833 	pending = cpu->cpu_m.mcpu_intr_pending & ~((1 << (ipl + 1)) - 1);
834 	if (pending) {
835 		int i;
836 		ulong_t pending_sels = 0;
837 		volatile ulong_t *selp;
838 		struct xen_evt_data *cpe = cpu->cpu_m.mcpu_evt_pend;
839 
840 		for (i = bsrw_insn(pending); i > ipl; i--)
841 			pending_sels |= cpe->pending_sel[i];
842 		ASSERT(pending_sels);
843 		selp = (volatile ulong_t *)&vci->evtchn_pending_sel;
844 		atomic_or_ulong(selp, pending_sels);
845 		vci->evtchn_upcall_pending = 1;
846 	}
847 }
848 
849 /*
850  * The rest of the file is just generic psm module boilerplate
851  */
852 
853 static struct psm_ops xen_uppc_ops = {
854 	xen_uppc_probe,				/* psm_probe		*/
855 
856 	xen_uppc_softinit,			/* psm_init		*/
857 	xen_uppc_picinit,			/* psm_picinit		*/
858 	xen_uppc_intr_enter,			/* psm_intr_enter	*/
859 	xen_uppc_intr_exit,			/* psm_intr_exit	*/
860 	xen_uppc_setspl,			/* psm_setspl		*/
861 	xen_uppc_addspl,			/* psm_addspl		*/
862 	xen_uppc_delspl,			/* psm_delspl		*/
863 	(int (*)(processorid_t))NULL,		/* psm_disable_intr	*/
864 	(void (*)(processorid_t))NULL,		/* psm_enable_intr	*/
865 	(int (*)(int))NULL,			/* psm_softlvl_to_irq	*/
866 	(void (*)(int))NULL,			/* psm_set_softintr	*/
867 	(void (*)(processorid_t))NULL,		/* psm_set_idlecpu	*/
868 	(void (*)(processorid_t))NULL,		/* psm_unset_idlecpu	*/
869 
870 	xen_uppc_clkinit,			/* psm_clkinit		*/
871 	xen_uppc_get_clockirq,			/* psm_get_clockirq	*/
872 	(void (*)(void))NULL,			/* psm_hrtimeinit	*/
873 	xpv_gethrtime,				/* psm_gethrtime	*/
874 
875 	xen_uppc_get_next_processorid,		/* psm_get_next_processorid */
876 	(int (*)(processorid_t, caddr_t))NULL,	/* psm_cpu_start	*/
877 	(int (*)(void))NULL,			/* psm_post_cpu_start	*/
878 	xen_uppc_shutdown,			/* psm_shutdown		*/
879 	(int (*)(int, int))NULL,		/* psm_get_ipivect	*/
880 	(void (*)(processorid_t, int))NULL,	/* psm_send_ipi		*/
881 
882 	xen_uppc_translate_irq,			/* psm_translate_irq	*/
883 
884 	(void (*)(int, char *))NULL,		/* psm_notify_error	*/
885 	(void (*)(int msg))NULL,		/* psm_notify_func	*/
886 	xen_uppc_timer_reprogram,		/* psm_timer_reprogram	*/
887 	xen_uppc_timer_enable,			/* psm_timer_enable	*/
888 	xen_uppc_timer_disable,			/* psm_timer_disable	*/
889 	(void (*)(void *arg))NULL,		/* psm_post_cyclic_setup */
890 	(void (*)(int, int))NULL,		/* psm_preshutdown	*/
891 
892 	(int (*)(dev_info_t *, ddi_intr_handle_impl_t *,
893 	    psm_intr_op_t, int *))NULL,		/* psm_intr_ops		*/
894 	(int (*)(psm_state_request_t *))NULL,	/* psm_state		*/
895 	(int (*)(psm_cpu_request_t *))NULL	/* psm_cpu_ops		*/
896 };
897 
898 static struct psm_info xen_uppc_info = {
899 	PSM_INFO_VER01_5,	/* version				*/
900 	PSM_OWN_SYS_DEFAULT,	/* ownership				*/
901 	&xen_uppc_ops,		/* operation				*/
902 	"xVM_uppc",		/* machine name				*/
903 	"UniProcessor PC"	/* machine descriptions			*/
904 };
905 
906 static void *xen_uppc_hdlp;
907 
908 int
909 _init(void)
910 {
911 	return (psm_mod_init(&xen_uppc_hdlp, &xen_uppc_info));
912 }
913 
914 int
915 _fini(void)
916 {
917 	return (psm_mod_fini(&xen_uppc_hdlp, &xen_uppc_info));
918 }
919 
920 int
921 _info(struct modinfo *modinfop)
922 {
923 	return (psm_mod_info(&xen_uppc_hdlp, &xen_uppc_info, modinfop));
924 }
925