1 /* 2 * General MIPS MT support routines, usable in AP/SP and SMVP. 3 * Copyright (C) 2005 Mips Technologies, Inc 4 */ 5 6 #include <linux/device.h> 7 #include <linux/kernel.h> 8 #include <linux/sched.h> 9 #include <linux/export.h> 10 #include <linux/interrupt.h> 11 #include <linux/security.h> 12 13 #include <asm/cpu.h> 14 #include <asm/processor.h> 15 #include <linux/atomic.h> 16 #include <asm/hardirq.h> 17 #include <asm/mmu_context.h> 18 #include <asm/mipsmtregs.h> 19 #include <asm/r4kcache.h> 20 #include <asm/cacheflush.h> 21 22 int vpelimit; 23 24 static int __init maxvpes(char *str) 25 { 26 get_option(&str, &vpelimit); 27 28 return 1; 29 } 30 31 __setup("maxvpes=", maxvpes); 32 33 int tclimit; 34 35 static int __init maxtcs(char *str) 36 { 37 get_option(&str, &tclimit); 38 39 return 1; 40 } 41 42 __setup("maxtcs=", maxtcs); 43 44 /* 45 * Dump new MIPS MT state for the core. Does not leave TCs halted. 46 * Takes an argument which taken to be a pre-call MVPControl value. 47 */ 48 49 void mips_mt_regdump(unsigned long mvpctl) 50 { 51 unsigned long flags; 52 unsigned long vpflags; 53 unsigned long mvpconf0; 54 int nvpe; 55 int ntc; 56 int i; 57 int tc; 58 unsigned long haltval; 59 unsigned long tcstatval; 60 61 local_irq_save(flags); 62 vpflags = dvpe(); 63 printk("=== MIPS MT State Dump ===\n"); 64 printk("-- Global State --\n"); 65 printk(" MVPControl Passed: %08lx\n", mvpctl); 66 printk(" MVPControl Read: %08lx\n", vpflags); 67 printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); 68 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 69 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 70 printk("-- per-VPE State --\n"); 71 for (i = 0; i < nvpe; i++) { 72 for (tc = 0; tc < ntc; tc++) { 73 settc(tc); 74 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { 75 printk(" VPE %d\n", i); 76 printk(" VPEControl : %08lx\n", 77 read_vpe_c0_vpecontrol()); 78 printk(" VPEConf0 : %08lx\n", 79 read_vpe_c0_vpeconf0()); 80 printk(" VPE%d.Status : %08lx\n", 81 i, read_vpe_c0_status()); 82 printk(" VPE%d.EPC : %08lx %pS\n", 83 i, read_vpe_c0_epc(), 84 (void *) read_vpe_c0_epc()); 85 printk(" VPE%d.Cause : %08lx\n", 86 i, read_vpe_c0_cause()); 87 printk(" VPE%d.Config7 : %08lx\n", 88 i, read_vpe_c0_config7()); 89 break; /* Next VPE */ 90 } 91 } 92 } 93 printk("-- per-TC State --\n"); 94 for (tc = 0; tc < ntc; tc++) { 95 settc(tc); 96 if (read_tc_c0_tcbind() == read_c0_tcbind()) { 97 /* Are we dumping ourself? */ 98 haltval = 0; /* Then we're not halted, and mustn't be */ 99 tcstatval = flags; /* And pre-dump TCStatus is flags */ 100 printk(" TC %d (current TC with VPE EPC above)\n", tc); 101 } else { 102 haltval = read_tc_c0_tchalt(); 103 write_tc_c0_tchalt(1); 104 tcstatval = read_tc_c0_tcstatus(); 105 printk(" TC %d\n", tc); 106 } 107 printk(" TCStatus : %08lx\n", tcstatval); 108 printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); 109 printk(" TCRestart : %08lx %pS\n", 110 read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); 111 printk(" TCHalt : %08lx\n", haltval); 112 printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); 113 if (!haltval) 114 write_tc_c0_tchalt(0); 115 } 116 printk("===========================\n"); 117 evpe(vpflags); 118 local_irq_restore(flags); 119 } 120 121 static int mt_opt_norps; 122 static int mt_opt_rpsctl = -1; 123 static int mt_opt_nblsu = -1; 124 static int mt_opt_forceconfig7; 125 static int mt_opt_config7 = -1; 126 127 static int __init rps_disable(char *s) 128 { 129 mt_opt_norps = 1; 130 return 1; 131 } 132 __setup("norps", rps_disable); 133 134 static int __init rpsctl_set(char *str) 135 { 136 get_option(&str, &mt_opt_rpsctl); 137 return 1; 138 } 139 __setup("rpsctl=", rpsctl_set); 140 141 static int __init nblsu_set(char *str) 142 { 143 get_option(&str, &mt_opt_nblsu); 144 return 1; 145 } 146 __setup("nblsu=", nblsu_set); 147 148 static int __init config7_set(char *str) 149 { 150 get_option(&str, &mt_opt_config7); 151 mt_opt_forceconfig7 = 1; 152 return 1; 153 } 154 __setup("config7=", config7_set); 155 156 /* Experimental cache flush control parameters that should go away some day */ 157 int mt_protiflush; 158 int mt_protdflush; 159 int mt_n_iflushes = 1; 160 int mt_n_dflushes = 1; 161 162 static int __init set_protiflush(char *s) 163 { 164 mt_protiflush = 1; 165 return 1; 166 } 167 __setup("protiflush", set_protiflush); 168 169 static int __init set_protdflush(char *s) 170 { 171 mt_protdflush = 1; 172 return 1; 173 } 174 __setup("protdflush", set_protdflush); 175 176 static int __init niflush(char *s) 177 { 178 get_option(&s, &mt_n_iflushes); 179 return 1; 180 } 181 __setup("niflush=", niflush); 182 183 static int __init ndflush(char *s) 184 { 185 get_option(&s, &mt_n_dflushes); 186 return 1; 187 } 188 __setup("ndflush=", ndflush); 189 190 static unsigned int itc_base; 191 192 static int __init set_itc_base(char *str) 193 { 194 get_option(&str, &itc_base); 195 return 1; 196 } 197 198 __setup("itcbase=", set_itc_base); 199 200 void mips_mt_set_cpuoptions(void) 201 { 202 unsigned int oconfig7 = read_c0_config7(); 203 unsigned int nconfig7 = oconfig7; 204 205 if (mt_opt_norps) { 206 printk("\"norps\" option deprecated: use \"rpsctl=\"\n"); 207 } 208 if (mt_opt_rpsctl >= 0) { 209 printk("34K return prediction stack override set to %d.\n", 210 mt_opt_rpsctl); 211 if (mt_opt_rpsctl) 212 nconfig7 |= (1 << 2); 213 else 214 nconfig7 &= ~(1 << 2); 215 } 216 if (mt_opt_nblsu >= 0) { 217 printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); 218 if (mt_opt_nblsu) 219 nconfig7 |= (1 << 5); 220 else 221 nconfig7 &= ~(1 << 5); 222 } 223 if (mt_opt_forceconfig7) { 224 printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); 225 nconfig7 = mt_opt_config7; 226 } 227 if (oconfig7 != nconfig7) { 228 __asm__ __volatile("sync"); 229 write_c0_config7(nconfig7); 230 ehb(); 231 printk("Config7: 0x%08x\n", read_c0_config7()); 232 } 233 234 /* Report Cache management debug options */ 235 if (mt_protiflush) 236 printk("I-cache flushes single-threaded\n"); 237 if (mt_protdflush) 238 printk("D-cache flushes single-threaded\n"); 239 if (mt_n_iflushes != 1) 240 printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); 241 if (mt_n_dflushes != 1) 242 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); 243 244 if (itc_base != 0) { 245 /* 246 * Configure ITC mapping. This code is very 247 * specific to the 34K core family, which uses 248 * a special mode bit ("ITC") in the ErrCtl 249 * register to enable access to ITC control 250 * registers via cache "tag" operations. 251 */ 252 unsigned long ectlval; 253 unsigned long itcblkgrn; 254 255 /* ErrCtl register is known as "ecc" to Linux */ 256 ectlval = read_c0_ecc(); 257 write_c0_ecc(ectlval | (0x1 << 26)); 258 ehb(); 259 #define INDEX_0 (0x80000000) 260 #define INDEX_8 (0x80000008) 261 /* Read "cache tag" for Dcache pseudo-index 8 */ 262 cache_op(Index_Load_Tag_D, INDEX_8); 263 ehb(); 264 itcblkgrn = read_c0_dtaglo(); 265 itcblkgrn &= 0xfffe0000; 266 /* Set for 128 byte pitch of ITC cells */ 267 itcblkgrn |= 0x00000c00; 268 /* Stage in Tag register */ 269 write_c0_dtaglo(itcblkgrn); 270 ehb(); 271 /* Write out to ITU with CACHE op */ 272 cache_op(Index_Store_Tag_D, INDEX_8); 273 /* Now set base address, and turn ITC on with 0x1 bit */ 274 write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); 275 ehb(); 276 /* Write out to ITU with CACHE op */ 277 cache_op(Index_Store_Tag_D, INDEX_0); 278 write_c0_ecc(ectlval); 279 ehb(); 280 printk("Mapped %ld ITC cells starting at 0x%08x\n", 281 ((itcblkgrn & 0x7fe00000) >> 20), itc_base); 282 } 283 } 284 285 /* 286 * Function to protect cache flushes from concurrent execution 287 * depends on MP software model chosen. 288 */ 289 290 void mt_cflush_lockdown(void) 291 { 292 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 293 } 294 295 void mt_cflush_release(void) 296 { 297 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 298 } 299 300 struct class *mt_class; 301 302 static int __init mt_init(void) 303 { 304 struct class *mtc; 305 306 mtc = class_create(THIS_MODULE, "mt"); 307 if (IS_ERR(mtc)) 308 return PTR_ERR(mtc); 309 310 mt_class = mtc; 311 312 return 0; 313 } 314 315 subsys_initcall(mt_init); 316