xref: /titanic_44/usr/src/uts/sun4v/os/intrq.c (revision 18c2aff776a775d34a4c9893a4c72e0434d68e36)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/machsystm.h>
30 #include <sys/cpu.h>
31 #include <sys/intreg.h>
32 #include <sys/machcpuvar.h>
33 #include <vm/hat_sfmmu.h>
34 #include <sys/error.h>
35 #include <sys/hypervisor_api.h>
36 
37 void
38 cpu_intrq_register(struct cpu *cpu)
39 {
40 	struct machcpu *mcpup = &cpu->cpu_m;
41 	uint64_t ret;
42 
43 	ret = hv_cpu_qconf(INTR_CPU_Q, mcpup->cpu_q_base_pa, cpu_q_entries);
44 	if (ret != H_EOK)
45 		cmn_err(CE_PANIC, "cpu%d: cpu_mondo queue configuration "
46 		    "failed, error %lu", cpu->cpu_id, ret);
47 
48 	ret = hv_cpu_qconf(INTR_DEV_Q, mcpup->dev_q_base_pa, dev_q_entries);
49 	if (ret != H_EOK)
50 		cmn_err(CE_PANIC, "cpu%d: dev_mondo queue configuration "
51 		    "failed, error %lu", cpu->cpu_id, ret);
52 
53 	ret = hv_cpu_qconf(CPU_RQ, mcpup->cpu_rq_base_pa, cpu_rq_entries);
54 	if (ret != H_EOK)
55 		cmn_err(CE_PANIC, "cpu%d: resumable error queue configuration "
56 		    "failed, error %lu", cpu->cpu_id, ret);
57 
58 	ret = hv_cpu_qconf(CPU_NRQ, mcpup->cpu_nrq_base_pa, cpu_nrq_entries);
59 	if (ret != H_EOK)
60 		cmn_err(CE_PANIC, "cpu%d: non-resumable error queue "
61 		    "configuration failed, error %lu", cpu->cpu_id, ret);
62 }
63 
64 void
65 cpu_intrq_setup(struct cpu *cpu)
66 {
67 	struct machcpu *mcpup = &cpu->cpu_m;
68 	int cpu_list_size;
69 	uint64_t cpu_q_size;
70 	uint64_t dev_q_size;
71 	uint64_t cpu_rq_size;
72 	uint64_t cpu_nrq_size;
73 
74 	/*
75 	 * Allocate mondo data for xcalls.
76 	 */
77 	mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);
78 	if (mcpup->mondo_data == NULL)
79 		cmn_err(CE_PANIC, "cpu%d: cpu mondo_data allocation failed",
80 		    cpu->cpu_id);
81 
82 	/*
83 	 *  Allocate a percpu list of NCPU for xcalls
84 	 */
85 	cpu_list_size = NCPU * sizeof (uint16_t);
86 	if (cpu_list_size < INTR_REPORT_SIZE)
87 		cpu_list_size = INTR_REPORT_SIZE;
88 
89 	mcpup->cpu_list = contig_mem_alloc(cpu_list_size);
90 	if (mcpup->cpu_list == NULL)
91 		cmn_err(CE_PANIC, "cpu%d: cpu cpu_list allocation failed",
92 		    cpu->cpu_id);
93 	mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);
94 
95 	/*
96 	 * va_to_pa() is too expensive to call for every crosscall
97 	 * so we do it here at init time and save it in machcpu.
98 	 */
99 	mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);
100 
101 	/*
102 	 * Allocate sun4v interrupt and error queues.
103 	 */
104 	cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
105 	mcpup->cpu_q_va = contig_mem_alloc(cpu_q_size);
106 	if (mcpup->cpu_q_va == NULL)
107 		cmn_err(CE_PANIC, "cpu%d: cpu intrq allocation failed",
108 		    cpu->cpu_id);
109 	mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
110 	mcpup->cpu_q_size =  cpu_q_size;
111 
112 	dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
113 	mcpup->dev_q_va = contig_mem_alloc(dev_q_size);
114 	if (mcpup->dev_q_va == NULL)
115 		cmn_err(CE_PANIC, "cpu%d: dev intrq allocation failed",
116 		    cpu->cpu_id);
117 	mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
118 	mcpup->dev_q_size =  dev_q_size;
119 
120 	/* Allocate resumable queue and its kernel buffer */
121 	cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
122 	mcpup->cpu_rq_va = contig_mem_alloc(2 * cpu_rq_size);
123 	if (mcpup->cpu_rq_va == NULL)
124 		cmn_err(CE_PANIC, "cpu%d: resumable queue allocation failed",
125 		    cpu->cpu_id);
126 	mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
127 	mcpup->cpu_rq_size = cpu_rq_size;
128 	/* zero out the memory */
129 	bzero(mcpup->cpu_rq_va, 2 * cpu_rq_size);
130 
131 	/* Allocate nonresumable queue here */
132 	cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
133 	mcpup->cpu_nrq_va = contig_mem_alloc(2 * cpu_nrq_size);
134 	if (mcpup->cpu_nrq_va == NULL)
135 		cmn_err(CE_PANIC, "cpu%d: nonresumable queue "
136 		    "allocation failed", cpu->cpu_id);
137 	mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
138 	mcpup->cpu_nrq_size = cpu_nrq_size;
139 	/* zero out the memory */
140 	bzero(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
141 }
142 
143 void
144 cpu_intrq_cleanup(struct cpu *cpu)
145 {
146 	struct machcpu *mcpup = &cpu->cpu_m;
147 	int cpu_list_size;
148 	uint64_t cpu_q_size;
149 	uint64_t dev_q_size;
150 	uint64_t cpu_rq_size;
151 	uint64_t cpu_nrq_size;
152 
153 	/*
154 	 * Free mondo data for xcalls.
155 	 */
156 	if (mcpup->mondo_data) {
157 		contig_mem_free(mcpup->mondo_data, INTR_REPORT_SIZE);
158 		mcpup->mondo_data = NULL;
159 		mcpup->mondo_data_ra = NULL;
160 	}
161 
162 	/*
163 	 *  Free percpu list of NCPU for xcalls
164 	 */
165 	cpu_list_size = NCPU * sizeof (uint16_t);
166 	if (cpu_list_size < INTR_REPORT_SIZE)
167 		cpu_list_size = INTR_REPORT_SIZE;
168 
169 	if (mcpup->cpu_list) {
170 		contig_mem_free(mcpup->cpu_list, cpu_list_size);
171 		mcpup->cpu_list = NULL;
172 		mcpup->cpu_list_ra = NULL;
173 	}
174 
175 	/*
176 	 * Free sun4v interrupt and error queues.
177 	 */
178 	if (mcpup->cpu_q_va) {
179 		cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
180 		contig_mem_free(mcpup->cpu_q_va, cpu_q_size);
181 		mcpup->cpu_q_va = NULL;
182 		mcpup->cpu_q_base_pa = NULL;
183 		mcpup->cpu_q_size = 0;
184 	}
185 
186 	if (mcpup->dev_q_va) {
187 		dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
188 		contig_mem_free(mcpup->dev_q_va, dev_q_size);
189 		mcpup->dev_q_va = NULL;
190 		mcpup->dev_q_base_pa = NULL;
191 		mcpup->dev_q_size = 0;
192 	}
193 
194 	if (mcpup->cpu_rq_va) {
195 		cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
196 		contig_mem_free(mcpup->cpu_rq_va, 2 * cpu_rq_size);
197 		mcpup->cpu_rq_va = NULL;
198 		mcpup->cpu_rq_base_pa = NULL;
199 		mcpup->cpu_rq_size = 0;
200 	}
201 
202 	if (mcpup->cpu_nrq_va) {
203 		cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
204 		contig_mem_free(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
205 		mcpup->cpu_nrq_va = NULL;
206 		mcpup->cpu_nrq_base_pa = NULL;
207 		mcpup->cpu_nrq_size = 0;
208 	}
209 }
210