xref: /linux/arch/x86/platform/uv/bios_uv.c (revision 132db93572821ec2fdf81e354cc40f558faf7e4f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * BIOS run time interface routines.
4  *
5  *  Copyright (c) 2008-2009 Silicon Graphics, Inc.  All Rights Reserved.
6  *  Copyright (c) Russ Anderson <rja@sgi.com>
7  */
8 
9 #include <linux/efi.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <asm/efi.h>
13 #include <linux/io.h>
14 #include <asm/uv/bios.h>
15 #include <asm/uv/uv_hub.h>
16 
17 unsigned long uv_systab_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
18 
19 struct uv_systab *uv_systab;
20 
21 static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
22 			u64 a4, u64 a5)
23 {
24 	struct uv_systab *tab = uv_systab;
25 	s64 ret;
26 
27 	if (!tab || !tab->function)
28 		/*
29 		 * BIOS does not support UV systab
30 		 */
31 		return BIOS_STATUS_UNIMPLEMENTED;
32 
33 	/*
34 	 * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
35 	 * callback method, which uses efi_call() directly, with the kernel page tables:
36 	 */
37 	if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
38 		kernel_fpu_begin();
39 		ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
40 		kernel_fpu_end();
41 	} else {
42 		ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
43 	}
44 
45 	return ret;
46 }
47 
48 static s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4,
49 		u64 a5)
50 {
51 	s64 ret;
52 
53 	if (down_interruptible(&__efi_uv_runtime_lock))
54 		return BIOS_STATUS_ABORT;
55 
56 	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
57 	up(&__efi_uv_runtime_lock);
58 
59 	return ret;
60 }
61 
62 static s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
63 		u64 a4, u64 a5)
64 {
65 	unsigned long bios_flags;
66 	s64 ret;
67 
68 	if (down_interruptible(&__efi_uv_runtime_lock))
69 		return BIOS_STATUS_ABORT;
70 
71 	local_irq_save(bios_flags);
72 	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
73 	local_irq_restore(bios_flags);
74 
75 	up(&__efi_uv_runtime_lock);
76 
77 	return ret;
78 }
79 
80 long sn_partition_id;
81 EXPORT_SYMBOL_GPL(sn_partition_id);
82 long sn_coherency_id;
83 long sn_region_size;
84 EXPORT_SYMBOL_GPL(sn_region_size);
85 long system_serial_number;
86 int uv_type;
87 
88 s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
89 		long *region, long *ssn)
90 {
91 	s64 ret;
92 	u64 v0, v1;
93 	union partition_info_u part;
94 
95 	ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
96 				(u64)(&v0), (u64)(&v1), 0, 0);
97 	if (ret != BIOS_STATUS_SUCCESS)
98 		return ret;
99 
100 	part.val = v0;
101 	if (uvtype)
102 		*uvtype = part.hub_version;
103 	if (partid)
104 		*partid = part.partition_id;
105 	if (coher)
106 		*coher = part.coherence_id;
107 	if (region)
108 		*region = part.region_size;
109 	if (ssn)
110 		*ssn = v1;
111 	return ret;
112 }
113 
114 int
115 uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
116 			   unsigned long *intr_mmr_offset)
117 {
118 	u64 watchlist;
119 	s64 ret;
120 
121 	/*
122 	 * bios returns watchlist number or negative error number.
123 	 */
124 	ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
125 			mq_size, (u64)intr_mmr_offset,
126 			(u64)&watchlist, 0);
127 	if (ret < BIOS_STATUS_SUCCESS)
128 		return ret;
129 
130 	return watchlist;
131 }
132 EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
133 
134 int
135 uv_bios_mq_watchlist_free(int blade, int watchlist_num)
136 {
137 	return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
138 				blade, watchlist_num, 0, 0, 0);
139 }
140 EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
141 
142 s64
143 uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
144 {
145 	return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
146 					perms, 0, 0);
147 }
148 EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
149 
150 s64
151 uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
152 {
153 	return uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
154 				    (u64)addr, buf, (u64)len, 0);
155 }
156 EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
157 
158 s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
159 {
160 	return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
161 			   (u64)ticks_per_second, 0, 0, 0);
162 }
163 
164 /*
165  * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target
166  * @decode: true to enable target, false to disable target
167  * @domain: PCI domain number
168  * @bus: PCI bus number
169  *
170  * Returns:
171  *    0: Success
172  *    -EINVAL: Invalid domain or bus number
173  *    -ENOSYS: Capability not available
174  *    -EBUSY: Legacy VGA I/O cannot be retargeted at this time
175  */
176 int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
177 {
178 	return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET,
179 				(u64)decode, (u64)domain, (u64)bus, 0, 0);
180 }
181 
182 int uv_bios_init(void)
183 {
184 	uv_systab = NULL;
185 	if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
186 	    !uv_systab_phys || efi_runtime_disabled()) {
187 		pr_crit("UV: UVsystab: missing\n");
188 		return -EEXIST;
189 	}
190 
191 	uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab));
192 	if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
193 		pr_err("UV: UVsystab: bad signature!\n");
194 		iounmap(uv_systab);
195 		return -EINVAL;
196 	}
197 
198 	/* Starting with UV4 the UV systab size is variable */
199 	if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
200 		int size = uv_systab->size;
201 
202 		iounmap(uv_systab);
203 		uv_systab = ioremap(uv_systab_phys, size);
204 		if (!uv_systab) {
205 			pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
206 			return -EFAULT;
207 		}
208 	}
209 	pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
210 	return 0;
211 }
212 
213 static void __init early_code_mapping_set_exec(int executable)
214 {
215 	efi_memory_desc_t *md;
216 
217 	if (!(__supported_pte_mask & _PAGE_NX))
218 		return;
219 
220 	/* Make EFI service code area executable */
221 	for_each_efi_memory_desc(md) {
222 		if (md->type == EFI_RUNTIME_SERVICES_CODE ||
223 		    md->type == EFI_BOOT_SERVICES_CODE)
224 			efi_set_executable(md, executable);
225 	}
226 }
227 
228 void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
229 {
230 	/*
231 	 * After the lock is released, the original page table is restored.
232 	 */
233 	int pgd_idx, i;
234 	int nr_pgds;
235 	pgd_t *pgd;
236 	p4d_t *p4d;
237 	pud_t *pud;
238 
239 	nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
240 
241 	for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
242 		pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
243 		set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
244 
245 		if (!pgd_present(*pgd))
246 			continue;
247 
248 		for (i = 0; i < PTRS_PER_P4D; i++) {
249 			p4d = p4d_offset(pgd,
250 					 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
251 
252 			if (!p4d_present(*p4d))
253 				continue;
254 
255 			pud = (pud_t *)p4d_page_vaddr(*p4d);
256 			pud_free(&init_mm, pud);
257 		}
258 
259 		p4d = (p4d_t *)pgd_page_vaddr(*pgd);
260 		p4d_free(&init_mm, p4d);
261 	}
262 
263 	kfree(save_pgd);
264 
265 	__flush_tlb_all();
266 	early_code_mapping_set_exec(0);
267 }
268 
269 pgd_t * __init efi_uv1_memmap_phys_prolog(void)
270 {
271 	unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
272 	pgd_t *save_pgd, *pgd_k, *pgd_efi;
273 	p4d_t *p4d, *p4d_k, *p4d_efi;
274 	pud_t *pud;
275 
276 	int pgd;
277 	int n_pgds, i, j;
278 
279 	early_code_mapping_set_exec(1);
280 
281 	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
282 	save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
283 	if (!save_pgd)
284 		return NULL;
285 
286 	/*
287 	 * Build 1:1 identity mapping for UV1 memmap usage. Note that
288 	 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
289 	 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
290 	 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
291 	 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
292 	 * This means here we can only reuse the PMD tables of the direct mapping.
293 	 */
294 	for (pgd = 0; pgd < n_pgds; pgd++) {
295 		addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
296 		vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
297 		pgd_efi = pgd_offset_k(addr_pgd);
298 		save_pgd[pgd] = *pgd_efi;
299 
300 		p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
301 		if (!p4d) {
302 			pr_err("Failed to allocate p4d table!\n");
303 			goto out;
304 		}
305 
306 		for (i = 0; i < PTRS_PER_P4D; i++) {
307 			addr_p4d = addr_pgd + i * P4D_SIZE;
308 			p4d_efi = p4d + p4d_index(addr_p4d);
309 
310 			pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
311 			if (!pud) {
312 				pr_err("Failed to allocate pud table!\n");
313 				goto out;
314 			}
315 
316 			for (j = 0; j < PTRS_PER_PUD; j++) {
317 				addr_pud = addr_p4d + j * PUD_SIZE;
318 
319 				if (addr_pud > (max_pfn << PAGE_SHIFT))
320 					break;
321 
322 				vaddr = (unsigned long)__va(addr_pud);
323 
324 				pgd_k = pgd_offset_k(vaddr);
325 				p4d_k = p4d_offset(pgd_k, vaddr);
326 				pud[j] = *pud_offset(p4d_k, vaddr);
327 			}
328 		}
329 		pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
330 	}
331 
332 	__flush_tlb_all();
333 	return save_pgd;
334 out:
335 	efi_uv1_memmap_phys_epilog(save_pgd);
336 	return NULL;
337 }
338 
339 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
340 				 u32 type, u64 attribute)
341 {
342 	unsigned long last_map_pfn;
343 
344 	if (type == EFI_MEMORY_MAPPED_IO)
345 		return ioremap(phys_addr, size);
346 
347 	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
348 					   PAGE_KERNEL);
349 	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
350 		unsigned long top = last_map_pfn << PAGE_SHIFT;
351 		efi_ioremap(top, size - (top - phys_addr), type, attribute);
352 	}
353 
354 	if (!(attribute & EFI_MEMORY_WB))
355 		efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
356 
357 	return (void __iomem *)__va(phys_addr);
358 }
359 
360 static int __init arch_parse_efi_cmdline(char *str)
361 {
362 	if (!str) {
363 		pr_warn("need at least one option\n");
364 		return -EINVAL;
365 	}
366 
367 	if (!efi_is_mixed() && parse_option_str(str, "old_map"))
368 		set_bit(EFI_UV1_MEMMAP, &efi.flags);
369 
370 	return 0;
371 }
372 early_param("efi", arch_parse_efi_cmdline);
373