xref: /titanic_41/usr/src/uts/sun4v/os/intrq.c (revision 982b91072930da051b05465dbd43c5ff024c96bc)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51ae08745Sheppo  * Common Development and Distribution License (the "License").
61ae08745Sheppo  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
211ae08745Sheppo 
227c478bd9Sstevel@tonic-gate /*
23*982b9107Sjb145095  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
287c478bd9Sstevel@tonic-gate 
297c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
307c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
317c478bd9Sstevel@tonic-gate #include <sys/intreg.h>
327c478bd9Sstevel@tonic-gate #include <sys/machcpuvar.h>
337c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
347c478bd9Sstevel@tonic-gate #include <sys/error.h>
357c478bd9Sstevel@tonic-gate #include <sys/hypervisor_api.h>
367c478bd9Sstevel@tonic-gate 
377c478bd9Sstevel@tonic-gate void
387c478bd9Sstevel@tonic-gate cpu_intrq_register(struct cpu *cpu)
397c478bd9Sstevel@tonic-gate {
407c478bd9Sstevel@tonic-gate 	struct machcpu *mcpup = &cpu->cpu_m;
417c478bd9Sstevel@tonic-gate 	uint64_t ret;
427c478bd9Sstevel@tonic-gate 
437c478bd9Sstevel@tonic-gate 	ret = hv_cpu_qconf(INTR_CPU_Q, mcpup->cpu_q_base_pa, cpu_q_entries);
447c478bd9Sstevel@tonic-gate 	if (ret != H_EOK)
457c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: cpu_mondo queue configuration "
467c478bd9Sstevel@tonic-gate 		    "failed, error %lu", cpu->cpu_id, ret);
477c478bd9Sstevel@tonic-gate 
487c478bd9Sstevel@tonic-gate 	ret = hv_cpu_qconf(INTR_DEV_Q, mcpup->dev_q_base_pa, dev_q_entries);
497c478bd9Sstevel@tonic-gate 	if (ret != H_EOK)
507c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: dev_mondo queue configuration "
517c478bd9Sstevel@tonic-gate 		    "failed, error %lu", cpu->cpu_id, ret);
527c478bd9Sstevel@tonic-gate 
531ae08745Sheppo 	ret = hv_cpu_qconf(CPU_RQ, mcpup->cpu_rq_base_pa, cpu_rq_entries);
547c478bd9Sstevel@tonic-gate 	if (ret != H_EOK)
557c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: resumable error queue configuration "
567c478bd9Sstevel@tonic-gate 		    "failed, error %lu", cpu->cpu_id, ret);
577c478bd9Sstevel@tonic-gate 
581ae08745Sheppo 	ret = hv_cpu_qconf(CPU_NRQ, mcpup->cpu_nrq_base_pa, cpu_nrq_entries);
597c478bd9Sstevel@tonic-gate 	if (ret != H_EOK)
607c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "cpu%d: non-resumable error queue "
617c478bd9Sstevel@tonic-gate 			"configuration failed, error %lu", cpu->cpu_id, ret);
627c478bd9Sstevel@tonic-gate }
637c478bd9Sstevel@tonic-gate 
64*982b9107Sjb145095 int
657c478bd9Sstevel@tonic-gate cpu_intrq_setup(struct cpu *cpu)
667c478bd9Sstevel@tonic-gate {
677c478bd9Sstevel@tonic-gate 	struct machcpu *mcpup = &cpu->cpu_m;
68*982b9107Sjb145095 	size_t size;
69*982b9107Sjb145095 
70*982b9107Sjb145095 	/*
71*982b9107Sjb145095 	 * This routine will return with an error return if any
72*982b9107Sjb145095 	 * contig_mem_alloc() fails.  It is expected that the caller will
73*982b9107Sjb145095 	 * call cpu_intrq_cleanup() (or cleanup_cpu_common() which will).
74*982b9107Sjb145095 	 * That will cleanly free only those blocks that were alloc'd.
75*982b9107Sjb145095 	 */
767c478bd9Sstevel@tonic-gate 
777c478bd9Sstevel@tonic-gate 	/*
787c478bd9Sstevel@tonic-gate 	 * Allocate mondo data for xcalls.
797c478bd9Sstevel@tonic-gate 	 */
807c478bd9Sstevel@tonic-gate 	mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);
81*982b9107Sjb145095 
82*982b9107Sjb145095 	if (mcpup->mondo_data == NULL) {
83*982b9107Sjb145095 		cmn_err(CE_NOTE, "cpu%d: cpu mondo_data allocation failed",
847c478bd9Sstevel@tonic-gate 			cpu->cpu_id);
85*982b9107Sjb145095 		return (ENOMEM);
86*982b9107Sjb145095 	}
877c478bd9Sstevel@tonic-gate 	/*
887c478bd9Sstevel@tonic-gate 	 * va_to_pa() is too expensive to call for every crosscall
897c478bd9Sstevel@tonic-gate 	 * so we do it here at init time and save it in machcpu.
907c478bd9Sstevel@tonic-gate 	 */
917c478bd9Sstevel@tonic-gate 	mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);
927c478bd9Sstevel@tonic-gate 
937c478bd9Sstevel@tonic-gate 	/*
94*982b9107Sjb145095 	 *  Allocate a percpu list of NCPU for xcalls
95*982b9107Sjb145095 	 */
96*982b9107Sjb145095 	size = NCPU * sizeof (uint16_t);
97*982b9107Sjb145095 	if (size < INTR_REPORT_SIZE)
98*982b9107Sjb145095 		size = INTR_REPORT_SIZE;
99*982b9107Sjb145095 
100*982b9107Sjb145095 	mcpup->cpu_list = contig_mem_alloc(size);
101*982b9107Sjb145095 
102*982b9107Sjb145095 	if (mcpup->cpu_list == NULL) {
103*982b9107Sjb145095 		cmn_err(CE_NOTE, "cpu%d: cpu cpu_list allocation failed",
104*982b9107Sjb145095 			cpu->cpu_id);
105*982b9107Sjb145095 		return (ENOMEM);
106*982b9107Sjb145095 	}
107*982b9107Sjb145095 	mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);
108*982b9107Sjb145095 
109*982b9107Sjb145095 	/*
1107c478bd9Sstevel@tonic-gate 	 * Allocate sun4v interrupt and error queues.
1117c478bd9Sstevel@tonic-gate 	 */
112*982b9107Sjb145095 	size = cpu_q_entries * INTR_REPORT_SIZE;
113*982b9107Sjb145095 
114*982b9107Sjb145095 	mcpup->cpu_q_va = contig_mem_alloc(size);
115*982b9107Sjb145095 
116*982b9107Sjb145095 	if (mcpup->cpu_q_va == NULL) {
117*982b9107Sjb145095 		cmn_err(CE_NOTE, "cpu%d: cpu intrq allocation failed",
1187c478bd9Sstevel@tonic-gate 			cpu->cpu_id);
119*982b9107Sjb145095 		return (ENOMEM);
120*982b9107Sjb145095 	}
1217c478bd9Sstevel@tonic-gate 	mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
122*982b9107Sjb145095 	mcpup->cpu_q_size = size;
1237c478bd9Sstevel@tonic-gate 
124*982b9107Sjb145095 	/*
125*982b9107Sjb145095 	 * Allocate device queues
126*982b9107Sjb145095 	 */
127*982b9107Sjb145095 	size = dev_q_entries * INTR_REPORT_SIZE;
128*982b9107Sjb145095 
129*982b9107Sjb145095 	mcpup->dev_q_va = contig_mem_alloc(size);
130*982b9107Sjb145095 
131*982b9107Sjb145095 	if (mcpup->dev_q_va == NULL) {
132*982b9107Sjb145095 		cmn_err(CE_NOTE, "cpu%d: dev intrq allocation failed",
1337c478bd9Sstevel@tonic-gate 			cpu->cpu_id);
134*982b9107Sjb145095 		return (ENOMEM);
135*982b9107Sjb145095 	}
1367c478bd9Sstevel@tonic-gate 	mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
137*982b9107Sjb145095 	mcpup->dev_q_size = size;
1387c478bd9Sstevel@tonic-gate 
139*982b9107Sjb145095 	/*
140*982b9107Sjb145095 	 * Allocate resumable queue and its kernel buffer
141*982b9107Sjb145095 	 */
142*982b9107Sjb145095 	size = cpu_rq_entries * Q_ENTRY_SIZE;
143*982b9107Sjb145095 
144*982b9107Sjb145095 	mcpup->cpu_rq_va = contig_mem_alloc(2 * size);
145*982b9107Sjb145095 
146*982b9107Sjb145095 	if (mcpup->cpu_rq_va == NULL) {
147*982b9107Sjb145095 		cmn_err(CE_NOTE, "cpu%d: resumable queue allocation failed",
1487c478bd9Sstevel@tonic-gate 			cpu->cpu_id);
149*982b9107Sjb145095 		return (ENOMEM);
150*982b9107Sjb145095 	}
1517c478bd9Sstevel@tonic-gate 	mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
152*982b9107Sjb145095 	mcpup->cpu_rq_size = size;
1537c478bd9Sstevel@tonic-gate 	/* zero out the memory */
154*982b9107Sjb145095 	bzero(mcpup->cpu_rq_va, 2 * size);
1557c478bd9Sstevel@tonic-gate 
156*982b9107Sjb145095 	/*
157*982b9107Sjb145095 	 * Allocate non-resumable queues
158*982b9107Sjb145095 	 */
159*982b9107Sjb145095 	size = cpu_nrq_entries * Q_ENTRY_SIZE;
160*982b9107Sjb145095 
161*982b9107Sjb145095 	mcpup->cpu_nrq_va = contig_mem_alloc(2 * size);
162*982b9107Sjb145095 
163*982b9107Sjb145095 	if (mcpup->cpu_nrq_va == NULL) {
164*982b9107Sjb145095 		cmn_err(CE_NOTE, "cpu%d: nonresumable queue allocation failed",
165*982b9107Sjb145095 			cpu->cpu_id);
166*982b9107Sjb145095 		return (ENOMEM);
167*982b9107Sjb145095 	}
1687c478bd9Sstevel@tonic-gate 	mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
169*982b9107Sjb145095 	mcpup->cpu_nrq_size = size;
1707c478bd9Sstevel@tonic-gate 	/* zero out the memory */
171*982b9107Sjb145095 	bzero(mcpup->cpu_nrq_va, 2 * size);
172*982b9107Sjb145095 
173*982b9107Sjb145095 	return (0);
1741ae08745Sheppo }
1757c478bd9Sstevel@tonic-gate 
1761ae08745Sheppo void
1771ae08745Sheppo cpu_intrq_cleanup(struct cpu *cpu)
1781ae08745Sheppo {
1791ae08745Sheppo 	struct machcpu *mcpup = &cpu->cpu_m;
1801ae08745Sheppo 	int cpu_list_size;
1811ae08745Sheppo 	uint64_t cpu_q_size;
1821ae08745Sheppo 	uint64_t dev_q_size;
1831ae08745Sheppo 	uint64_t cpu_rq_size;
1841ae08745Sheppo 	uint64_t cpu_nrq_size;
1851ae08745Sheppo 
1861ae08745Sheppo 	/*
1871ae08745Sheppo 	 * Free mondo data for xcalls.
1881ae08745Sheppo 	 */
1891ae08745Sheppo 	if (mcpup->mondo_data) {
1901ae08745Sheppo 		contig_mem_free(mcpup->mondo_data, INTR_REPORT_SIZE);
1911ae08745Sheppo 		mcpup->mondo_data = NULL;
1921ae08745Sheppo 		mcpup->mondo_data_ra = NULL;
1931ae08745Sheppo 	}
1941ae08745Sheppo 
1951ae08745Sheppo 	/*
1961ae08745Sheppo 	 *  Free percpu list of NCPU for xcalls
1971ae08745Sheppo 	 */
1981ae08745Sheppo 	cpu_list_size = NCPU * sizeof (uint16_t);
1991ae08745Sheppo 	if (cpu_list_size < INTR_REPORT_SIZE)
2001ae08745Sheppo 		cpu_list_size = INTR_REPORT_SIZE;
2011ae08745Sheppo 
2021ae08745Sheppo 	if (mcpup->cpu_list) {
2031ae08745Sheppo 		contig_mem_free(mcpup->cpu_list, cpu_list_size);
2041ae08745Sheppo 		mcpup->cpu_list = NULL;
2051ae08745Sheppo 		mcpup->cpu_list_ra = NULL;
2061ae08745Sheppo 	}
2071ae08745Sheppo 
2081ae08745Sheppo 	/*
2091ae08745Sheppo 	 * Free sun4v interrupt and error queues.
2101ae08745Sheppo 	 */
2111ae08745Sheppo 	if (mcpup->cpu_q_va) {
2121ae08745Sheppo 		cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
2131ae08745Sheppo 		contig_mem_free(mcpup->cpu_q_va, cpu_q_size);
2141ae08745Sheppo 		mcpup->cpu_q_va = NULL;
2151ae08745Sheppo 		mcpup->cpu_q_base_pa = NULL;
2161ae08745Sheppo 		mcpup->cpu_q_size = 0;
2171ae08745Sheppo 	}
2181ae08745Sheppo 
2191ae08745Sheppo 	if (mcpup->dev_q_va) {
2201ae08745Sheppo 		dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
2211ae08745Sheppo 		contig_mem_free(mcpup->dev_q_va, dev_q_size);
2221ae08745Sheppo 		mcpup->dev_q_va = NULL;
2231ae08745Sheppo 		mcpup->dev_q_base_pa = NULL;
2241ae08745Sheppo 		mcpup->dev_q_size = 0;
2251ae08745Sheppo 	}
2261ae08745Sheppo 
2271ae08745Sheppo 	if (mcpup->cpu_rq_va) {
2281ae08745Sheppo 		cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
2291ae08745Sheppo 		contig_mem_free(mcpup->cpu_rq_va, 2 * cpu_rq_size);
2301ae08745Sheppo 		mcpup->cpu_rq_va = NULL;
2311ae08745Sheppo 		mcpup->cpu_rq_base_pa = NULL;
2321ae08745Sheppo 		mcpup->cpu_rq_size = 0;
2331ae08745Sheppo 	}
2341ae08745Sheppo 
2351ae08745Sheppo 	if (mcpup->cpu_nrq_va) {
2361ae08745Sheppo 		cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
2371ae08745Sheppo 		contig_mem_free(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
2381ae08745Sheppo 		mcpup->cpu_nrq_va = NULL;
2391ae08745Sheppo 		mcpup->cpu_nrq_base_pa = NULL;
2401ae08745Sheppo 		mcpup->cpu_nrq_size = 0;
2411ae08745Sheppo 	}
2427c478bd9Sstevel@tonic-gate }
243