1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * cbe_regs.c 4 * 5 * Accessor routines for the various MMIO register blocks of the CBE 6 * 7 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. 8 */ 9 10 #include <linux/percpu.h> 11 #include <linux/types.h> 12 #include <linux/export.h> 13 #include <linux/of_address.h> 14 #include <linux/of_device.h> 15 #include <linux/of_platform.h> 16 #include <linux/pgtable.h> 17 18 #include <asm/io.h> 19 #include <asm/ptrace.h> 20 #include <asm/cell-regs.h> 21 22 /* 23 * Current implementation uses "cpu" nodes. We build our own mapping 24 * array of cpu numbers to cpu nodes locally for now to allow interrupt 25 * time code to have a fast path rather than call of_get_cpu_node(). If 26 * we implement cpu hotplug, we'll have to install an appropriate notifier 27 * in order to release references to the cpu going away 28 */ 29 static struct cbe_regs_map 30 { 31 struct device_node *cpu_node; 32 struct device_node *be_node; 33 struct cbe_pmd_regs __iomem *pmd_regs; 34 struct cbe_iic_regs __iomem *iic_regs; 35 struct cbe_mic_tm_regs __iomem *mic_tm_regs; 36 struct cbe_pmd_shadow_regs pmd_shadow_regs; 37 } cbe_regs_maps[MAX_CBE]; 38 static int cbe_regs_map_count; 39 40 static struct cbe_thread_map 41 { 42 struct device_node *cpu_node; 43 struct device_node *be_node; 44 struct cbe_regs_map *regs; 45 unsigned int thread_id; 46 unsigned int cbe_id; 47 } cbe_thread_map[NR_CPUS]; 48 49 static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; 50 static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; 51 52 static struct cbe_regs_map *cbe_find_map(struct device_node *np) 53 { 54 int i; 55 struct device_node *tmp_np; 56 57 if (!of_node_is_type(np, "spe")) { 58 for (i = 0; i < cbe_regs_map_count; i++) 59 if (cbe_regs_maps[i].cpu_node == np || 60 cbe_regs_maps[i].be_node == np) 61 return &cbe_regs_maps[i]; 62 return NULL; 63 } 64 65 if (np->data) 66 return np->data; 67 68 /* walk up path until cpu or be node was found */ 69 tmp_np = np; 70 do { 71 tmp_np = tmp_np->parent; 72 /* on a correct devicetree we wont get up to root */ 73 BUG_ON(!tmp_np); 74 } while (!of_node_is_type(tmp_np, "cpu") || 75 !of_node_is_type(tmp_np, "be")); 76 77 np->data = cbe_find_map(tmp_np); 78 79 return np->data; 80 } 81 82 struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) 83 { 84 struct cbe_regs_map *map = cbe_find_map(np); 85 if (map == NULL) 86 return NULL; 87 return map->pmd_regs; 88 } 89 EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); 90 91 struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) 92 { 93 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 94 if (map == NULL) 95 return NULL; 96 return map->pmd_regs; 97 } 98 EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); 99 100 struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) 101 { 102 struct cbe_regs_map *map = cbe_find_map(np); 103 if (map == NULL) 104 return NULL; 105 return &map->pmd_shadow_regs; 106 } 107 108 struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) 109 { 110 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 111 if (map == NULL) 112 return NULL; 113 return &map->pmd_shadow_regs; 114 } 115 116 struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) 117 { 118 struct cbe_regs_map *map = cbe_find_map(np); 119 if (map == NULL) 120 return NULL; 121 return map->iic_regs; 122 } 123 124 struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) 125 { 126 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 127 if (map == NULL) 128 return NULL; 129 return map->iic_regs; 130 } 131 132 struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) 133 { 134 struct cbe_regs_map *map = cbe_find_map(np); 135 if (map == NULL) 136 return NULL; 137 return map->mic_tm_regs; 138 } 139 140 struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) 141 { 142 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 143 if (map == NULL) 144 return NULL; 145 return map->mic_tm_regs; 146 } 147 EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); 148 149 u32 cbe_get_hw_thread_id(int cpu) 150 { 151 return cbe_thread_map[cpu].thread_id; 152 } 153 EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); 154 155 u32 cbe_cpu_to_node(int cpu) 156 { 157 return cbe_thread_map[cpu].cbe_id; 158 } 159 EXPORT_SYMBOL_GPL(cbe_cpu_to_node); 160 161 u32 cbe_node_to_cpu(int node) 162 { 163 return cpumask_first(&cbe_local_mask[node]); 164 165 } 166 EXPORT_SYMBOL_GPL(cbe_node_to_cpu); 167 168 static struct device_node *__init cbe_get_be_node(int cpu_id) 169 { 170 struct device_node *np; 171 172 for_each_node_by_type (np, "be") { 173 int len,i; 174 const phandle *cpu_handle; 175 176 cpu_handle = of_get_property(np, "cpus", &len); 177 178 /* 179 * the CAB SLOF tree is non compliant, so we just assume 180 * there is only one node 181 */ 182 if (WARN_ON_ONCE(!cpu_handle)) 183 return np; 184 185 for (i = 0; i < len; i++) { 186 struct device_node *ch_np = of_find_node_by_phandle(cpu_handle[i]); 187 struct device_node *ci_np = of_get_cpu_node(cpu_id, NULL); 188 189 of_node_put(ch_np); 190 of_node_put(ci_np); 191 192 if (ch_np == ci_np) 193 return np; 194 } 195 } 196 197 return NULL; 198 } 199 200 static void __init cbe_fill_regs_map(struct cbe_regs_map *map) 201 { 202 if(map->be_node) { 203 struct device_node *be, *np, *parent_np; 204 205 be = map->be_node; 206 207 for_each_node_by_type(np, "pervasive") { 208 parent_np = of_get_parent(np); 209 if (parent_np == be) 210 map->pmd_regs = of_iomap(np, 0); 211 of_node_put(parent_np); 212 } 213 214 for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") { 215 parent_np = of_get_parent(np); 216 if (parent_np == be) 217 map->iic_regs = of_iomap(np, 2); 218 of_node_put(parent_np); 219 } 220 221 for_each_node_by_type(np, "mic-tm") { 222 parent_np = of_get_parent(np); 223 if (parent_np == be) 224 map->mic_tm_regs = of_iomap(np, 0); 225 of_node_put(parent_np); 226 } 227 } else { 228 struct device_node *cpu; 229 /* That hack must die die die ! */ 230 const struct address_prop { 231 unsigned long address; 232 unsigned int len; 233 } __attribute__((packed)) *prop; 234 235 cpu = map->cpu_node; 236 237 prop = of_get_property(cpu, "pervasive", NULL); 238 if (prop != NULL) 239 map->pmd_regs = ioremap(prop->address, prop->len); 240 241 prop = of_get_property(cpu, "iic", NULL); 242 if (prop != NULL) 243 map->iic_regs = ioremap(prop->address, prop->len); 244 245 prop = of_get_property(cpu, "mic-tm", NULL); 246 if (prop != NULL) 247 map->mic_tm_regs = ioremap(prop->address, prop->len); 248 } 249 } 250 251 252 void __init cbe_regs_init(void) 253 { 254 int i; 255 unsigned int thread_id; 256 struct device_node *cpu; 257 258 /* Build local fast map of CPUs */ 259 for_each_possible_cpu(i) { 260 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); 261 cbe_thread_map[i].be_node = cbe_get_be_node(i); 262 cbe_thread_map[i].thread_id = thread_id; 263 } 264 265 /* Find maps for each device tree CPU */ 266 for_each_node_by_type(cpu, "cpu") { 267 struct cbe_regs_map *map; 268 unsigned int cbe_id; 269 270 cbe_id = cbe_regs_map_count++; 271 map = &cbe_regs_maps[cbe_id]; 272 273 if (cbe_regs_map_count > MAX_CBE) { 274 printk(KERN_ERR "cbe_regs: More BE chips than supported" 275 "!\n"); 276 cbe_regs_map_count--; 277 of_node_put(cpu); 278 return; 279 } 280 of_node_put(map->cpu_node); 281 map->cpu_node = of_node_get(cpu); 282 283 for_each_possible_cpu(i) { 284 struct cbe_thread_map *thread = &cbe_thread_map[i]; 285 286 if (thread->cpu_node == cpu) { 287 thread->regs = map; 288 thread->cbe_id = cbe_id; 289 map->be_node = thread->be_node; 290 cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); 291 if(thread->thread_id == 0) 292 cpumask_set_cpu(i, &cbe_first_online_cpu); 293 } 294 } 295 296 cbe_fill_regs_map(map); 297 } 298 } 299 300