xref: /linux/arch/arm64/kernel/smp_spin_table.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Spin Table SMP initialisation
4  *
5  * Copyright (C) 2013 ARM Ltd.
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/of.h>
11 #include <linux/smp.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/cpu_ops.h>
17 #include <asm/cputype.h>
18 #include <asm/io.h>
19 #include <asm/smp_plat.h>
20 
21 extern void secondary_holding_pen(void);
22 volatile unsigned long __section(".mmuoff.data.read")
23 secondary_holding_pen_release = INVALID_HWID;
24 
25 static phys_addr_t cpu_release_addr[NR_CPUS];
26 
27 /*
28  * Write secondary_holding_pen_release in a way that is guaranteed to be
29  * visible to all observers, irrespective of whether they're taking part
30  * in coherency or not.  This is necessary for the hotplug code to work
31  * reliably.
32  */
write_pen_release(u64 val)33 static void write_pen_release(u64 val)
34 {
35 	void *start = (void *)&secondary_holding_pen_release;
36 	unsigned long size = sizeof(secondary_holding_pen_release);
37 
38 	secondary_holding_pen_release = val;
39 	dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
40 }
41 
42 
smp_spin_table_cpu_init(unsigned int cpu)43 static int smp_spin_table_cpu_init(unsigned int cpu)
44 {
45 	struct device_node *dn;
46 	int ret;
47 
48 	dn = of_get_cpu_node(cpu, NULL);
49 	if (!dn)
50 		return -ENODEV;
51 
52 	/*
53 	 * Determine the address from which the CPU is polling.
54 	 */
55 	ret = of_property_read_u64(dn, "cpu-release-addr",
56 				   &cpu_release_addr[cpu]);
57 	if (ret)
58 		pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
59 		       cpu);
60 
61 	of_node_put(dn);
62 
63 	return ret;
64 }
65 
smp_spin_table_cpu_prepare(unsigned int cpu)66 static int smp_spin_table_cpu_prepare(unsigned int cpu)
67 {
68 	__le64 __iomem *release_addr;
69 	phys_addr_t pa_holding_pen = __pa_symbol(secondary_holding_pen);
70 
71 	if (!cpu_release_addr[cpu])
72 		return -ENODEV;
73 
74 	/*
75 	 * The cpu-release-addr may or may not be inside the linear mapping.
76 	 * As ioremap_cache will either give us a new mapping or reuse the
77 	 * existing linear mapping, we can use it to cover both cases. In
78 	 * either case the memory will be MT_NORMAL.
79 	 */
80 	release_addr = ioremap_cache(cpu_release_addr[cpu],
81 				     sizeof(*release_addr));
82 	if (!release_addr)
83 		return -ENOMEM;
84 
85 	/*
86 	 * We write the release address as LE regardless of the native
87 	 * endianness of the kernel. Therefore, any boot-loaders that
88 	 * read this address need to convert this address to the
89 	 * boot-loader's endianness before jumping. This is mandated by
90 	 * the boot protocol.
91 	 */
92 	writeq_relaxed(pa_holding_pen, release_addr);
93 	dcache_clean_inval_poc((__force unsigned long)release_addr,
94 			    (__force unsigned long)release_addr +
95 				    sizeof(*release_addr));
96 
97 	/*
98 	 * Send an event to wake up the secondary CPU.
99 	 */
100 	sev();
101 
102 	iounmap(release_addr);
103 
104 	return 0;
105 }
106 
smp_spin_table_cpu_boot(unsigned int cpu)107 static int smp_spin_table_cpu_boot(unsigned int cpu)
108 {
109 	/*
110 	 * Update the pen release flag.
111 	 */
112 	write_pen_release(cpu_logical_map(cpu));
113 
114 	/*
115 	 * Send an event, causing the secondaries to read pen_release.
116 	 */
117 	sev();
118 
119 	return 0;
120 }
121 
122 const struct cpu_operations smp_spin_table_ops = {
123 	.name		= "spin-table",
124 	.cpu_init	= smp_spin_table_cpu_init,
125 	.cpu_prepare	= smp_spin_table_cpu_prepare,
126 	.cpu_boot	= smp_spin_table_cpu_boot,
127 };
128