xref: /titanic_51/usr/src/uts/sun4v/os/intrq.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/machsystm.h>
30 #include <sys/cpu.h>
31 #include <sys/intreg.h>
32 #include <sys/machcpuvar.h>
33 #include <vm/hat_sfmmu.h>
34 #include <sys/error.h>
35 #include <sys/hypervisor_api.h>
36 
37 void
38 cpu_intrq_register(struct cpu *cpu)
39 {
40 	struct machcpu *mcpup = &cpu->cpu_m;
41 	uint64_t ret;
42 
43 	ret = hv_cpu_qconf(INTR_CPU_Q, mcpup->cpu_q_base_pa, cpu_q_entries);
44 	if (ret != H_EOK)
45 		cmn_err(CE_PANIC, "cpu%d: cpu_mondo queue configuration "
46 		    "failed, error %lu", cpu->cpu_id, ret);
47 
48 	ret = hv_cpu_qconf(INTR_DEV_Q, mcpup->dev_q_base_pa, dev_q_entries);
49 	if (ret != H_EOK)
50 		cmn_err(CE_PANIC, "cpu%d: dev_mondo queue configuration "
51 		    "failed, error %lu", cpu->cpu_id, ret);
52 
53 	ret = hv_cpu_qconf(CPU_RQ, mcpup->cpu_rq_base_pa, cpu_rq_entries);
54 	if (ret != H_EOK)
55 		cmn_err(CE_PANIC, "cpu%d: resumable error queue configuration "
56 		    "failed, error %lu", cpu->cpu_id, ret);
57 
58 	ret = hv_cpu_qconf(CPU_NRQ, mcpup->cpu_nrq_base_pa, cpu_nrq_entries);
59 	if (ret != H_EOK)
60 		cmn_err(CE_PANIC, "cpu%d: non-resumable error queue "
61 			"configuration failed, error %lu", cpu->cpu_id, ret);
62 }
63 
64 int
65 cpu_intrq_setup(struct cpu *cpu)
66 {
67 	struct machcpu *mcpup = &cpu->cpu_m;
68 	size_t size;
69 
70 	/*
71 	 * This routine will return with an error return if any
72 	 * contig_mem_alloc() fails.  It is expected that the caller will
73 	 * call cpu_intrq_cleanup() (or cleanup_cpu_common() which will).
74 	 * That will cleanly free only those blocks that were alloc'd.
75 	 */
76 
77 	/*
78 	 * Allocate mondo data for xcalls.
79 	 */
80 	mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);
81 
82 	if (mcpup->mondo_data == NULL) {
83 		cmn_err(CE_NOTE, "cpu%d: cpu mondo_data allocation failed",
84 			cpu->cpu_id);
85 		return (ENOMEM);
86 	}
87 	/*
88 	 * va_to_pa() is too expensive to call for every crosscall
89 	 * so we do it here at init time and save it in machcpu.
90 	 */
91 	mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);
92 
93 	/*
94 	 *  Allocate a percpu list of NCPU for xcalls
95 	 */
96 	size = NCPU * sizeof (uint16_t);
97 	if (size < INTR_REPORT_SIZE)
98 		size = INTR_REPORT_SIZE;
99 
100 	mcpup->cpu_list = contig_mem_alloc(size);
101 
102 	if (mcpup->cpu_list == NULL) {
103 		cmn_err(CE_NOTE, "cpu%d: cpu cpu_list allocation failed",
104 			cpu->cpu_id);
105 		return (ENOMEM);
106 	}
107 	mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);
108 
109 	/*
110 	 * Allocate sun4v interrupt and error queues.
111 	 */
112 	size = cpu_q_entries * INTR_REPORT_SIZE;
113 
114 	mcpup->cpu_q_va = contig_mem_alloc(size);
115 
116 	if (mcpup->cpu_q_va == NULL) {
117 		cmn_err(CE_NOTE, "cpu%d: cpu intrq allocation failed",
118 			cpu->cpu_id);
119 		return (ENOMEM);
120 	}
121 	mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
122 	mcpup->cpu_q_size = size;
123 
124 	/*
125 	 * Allocate device queues
126 	 */
127 	size = dev_q_entries * INTR_REPORT_SIZE;
128 
129 	mcpup->dev_q_va = contig_mem_alloc(size);
130 
131 	if (mcpup->dev_q_va == NULL) {
132 		cmn_err(CE_NOTE, "cpu%d: dev intrq allocation failed",
133 			cpu->cpu_id);
134 		return (ENOMEM);
135 	}
136 	mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
137 	mcpup->dev_q_size = size;
138 
139 	/*
140 	 * Allocate resumable queue and its kernel buffer
141 	 */
142 	size = cpu_rq_entries * Q_ENTRY_SIZE;
143 
144 	mcpup->cpu_rq_va = contig_mem_alloc(2 * size);
145 
146 	if (mcpup->cpu_rq_va == NULL) {
147 		cmn_err(CE_NOTE, "cpu%d: resumable queue allocation failed",
148 			cpu->cpu_id);
149 		return (ENOMEM);
150 	}
151 	mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
152 	mcpup->cpu_rq_size = size;
153 	/* zero out the memory */
154 	bzero(mcpup->cpu_rq_va, 2 * size);
155 
156 	/*
157 	 * Allocate non-resumable queues
158 	 */
159 	size = cpu_nrq_entries * Q_ENTRY_SIZE;
160 
161 	mcpup->cpu_nrq_va = contig_mem_alloc(2 * size);
162 
163 	if (mcpup->cpu_nrq_va == NULL) {
164 		cmn_err(CE_NOTE, "cpu%d: nonresumable queue allocation failed",
165 			cpu->cpu_id);
166 		return (ENOMEM);
167 	}
168 	mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
169 	mcpup->cpu_nrq_size = size;
170 	/* zero out the memory */
171 	bzero(mcpup->cpu_nrq_va, 2 * size);
172 
173 	return (0);
174 }
175 
176 void
177 cpu_intrq_cleanup(struct cpu *cpu)
178 {
179 	struct machcpu *mcpup = &cpu->cpu_m;
180 	int cpu_list_size;
181 	uint64_t cpu_q_size;
182 	uint64_t dev_q_size;
183 	uint64_t cpu_rq_size;
184 	uint64_t cpu_nrq_size;
185 
186 	/*
187 	 * Free mondo data for xcalls.
188 	 */
189 	if (mcpup->mondo_data) {
190 		contig_mem_free(mcpup->mondo_data, INTR_REPORT_SIZE);
191 		mcpup->mondo_data = NULL;
192 		mcpup->mondo_data_ra = NULL;
193 	}
194 
195 	/*
196 	 *  Free percpu list of NCPU for xcalls
197 	 */
198 	cpu_list_size = NCPU * sizeof (uint16_t);
199 	if (cpu_list_size < INTR_REPORT_SIZE)
200 		cpu_list_size = INTR_REPORT_SIZE;
201 
202 	if (mcpup->cpu_list) {
203 		contig_mem_free(mcpup->cpu_list, cpu_list_size);
204 		mcpup->cpu_list = NULL;
205 		mcpup->cpu_list_ra = NULL;
206 	}
207 
208 	/*
209 	 * Free sun4v interrupt and error queues.
210 	 */
211 	if (mcpup->cpu_q_va) {
212 		cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
213 		contig_mem_free(mcpup->cpu_q_va, cpu_q_size);
214 		mcpup->cpu_q_va = NULL;
215 		mcpup->cpu_q_base_pa = NULL;
216 		mcpup->cpu_q_size = 0;
217 	}
218 
219 	if (mcpup->dev_q_va) {
220 		dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
221 		contig_mem_free(mcpup->dev_q_va, dev_q_size);
222 		mcpup->dev_q_va = NULL;
223 		mcpup->dev_q_base_pa = NULL;
224 		mcpup->dev_q_size = 0;
225 	}
226 
227 	if (mcpup->cpu_rq_va) {
228 		cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
229 		contig_mem_free(mcpup->cpu_rq_va, 2 * cpu_rq_size);
230 		mcpup->cpu_rq_va = NULL;
231 		mcpup->cpu_rq_base_pa = NULL;
232 		mcpup->cpu_rq_size = 0;
233 	}
234 
235 	if (mcpup->cpu_nrq_va) {
236 		cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
237 		contig_mem_free(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
238 		mcpup->cpu_nrq_va = NULL;
239 		mcpup->cpu_nrq_base_pa = NULL;
240 		mcpup->cpu_nrq_size = 0;
241 	}
242 }
243