1 /* 2 * arch/s390/kernel/early.c 3 * 4 * Copyright IBM Corp. 2007 5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, 6 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/errno.h> 11 #include <linux/string.h> 12 #include <linux/ctype.h> 13 #include <linux/lockdep.h> 14 #include <linux/module.h> 15 #include <linux/pfn.h> 16 #include <linux/uaccess.h> 17 #include <asm/lowcore.h> 18 #include <asm/processor.h> 19 #include <asm/sections.h> 20 #include <asm/setup.h> 21 #include <asm/cpcmd.h> 22 #include <asm/sclp.h> 23 24 /* 25 * Create a Kernel NSS if the SAVESYS= parameter is defined 26 */ 27 #define DEFSYS_CMD_SIZE 96 28 #define SAVESYS_CMD_SIZE 32 29 30 char kernel_nss_name[NSS_NAME_SIZE + 1]; 31 32 #ifdef CONFIG_SHARED_KERNEL 33 static noinline __init void create_kernel_nss(void) 34 { 35 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; 36 #ifdef CONFIG_BLK_DEV_INITRD 37 unsigned int sinitrd_pfn, einitrd_pfn; 38 #endif 39 int response; 40 char *savesys_ptr; 41 char upper_command_line[COMMAND_LINE_SIZE]; 42 char defsys_cmd[DEFSYS_CMD_SIZE]; 43 char savesys_cmd[SAVESYS_CMD_SIZE]; 44 45 /* Do nothing if we are not running under VM */ 46 if (!MACHINE_IS_VM) 47 return; 48 49 /* Convert COMMAND_LINE to upper case */ 50 for (i = 0; i < strlen(COMMAND_LINE); i++) 51 upper_command_line[i] = toupper(COMMAND_LINE[i]); 52 53 savesys_ptr = strstr(upper_command_line, "SAVESYS="); 54 55 if (!savesys_ptr) 56 return; 57 58 savesys_ptr += 8; /* Point to the beginning of the NSS name */ 59 for (i = 0; i < NSS_NAME_SIZE; i++) { 60 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0') 61 break; 62 kernel_nss_name[i] = savesys_ptr[i]; 63 } 64 65 stext_pfn = PFN_DOWN(__pa(&_stext)); 66 eshared_pfn = PFN_DOWN(__pa(&_eshared)); 67 end_pfn = PFN_UP(__pa(&_end)); 68 min_size = end_pfn << 2; 69 70 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X", 71 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1, 72 eshared_pfn, end_pfn); 73 74 #ifdef CONFIG_BLK_DEV_INITRD 75 if (INITRD_START && INITRD_SIZE) { 76 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START)); 77 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE)); 78 min_size = einitrd_pfn << 2; 79 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd, 80 sinitrd_pfn, einitrd_pfn); 81 } 82 #endif 83 84 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); 85 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", 86 kernel_nss_name, kernel_nss_name); 87 88 __cpcmd(defsys_cmd, NULL, 0, &response); 89 90 if (response != 0) 91 return; 92 93 __cpcmd(savesys_cmd, NULL, 0, &response); 94 95 if (response != strlen(savesys_cmd)) 96 return; 97 98 ipl_flags = IPL_NSS_VALID; 99 } 100 101 #else /* CONFIG_SHARED_KERNEL */ 102 103 static inline void create_kernel_nss(void) { } 104 105 #endif /* CONFIG_SHARED_KERNEL */ 106 107 /* 108 * Clear bss memory 109 */ 110 static noinline __init void clear_bss_section(void) 111 { 112 memset(__bss_start, 0, _end - __bss_start); 113 } 114 115 /* 116 * Initialize storage key for kernel pages 117 */ 118 static noinline __init void init_kernel_storage_key(void) 119 { 120 unsigned long end_pfn, init_pfn; 121 122 end_pfn = PFN_UP(__pa(&_end)); 123 124 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) 125 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 126 } 127 128 static noinline __init void detect_machine_type(void) 129 { 130 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; 131 132 asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id)); 133 134 /* Running under z/VM ? */ 135 if (cpuinfo->cpu_id.version == 0xff) 136 machine_flags |= 1; 137 138 /* Running on a P/390 ? */ 139 if (cpuinfo->cpu_id.machine == 0x7490) 140 machine_flags |= 4; 141 } 142 143 static noinline __init int memory_fast_detect(void) 144 { 145 146 unsigned long val0 = 0; 147 unsigned long val1 = 0xc; 148 int ret = -ENOSYS; 149 150 if (ipl_flags & IPL_NSS_VALID) 151 return -ENOSYS; 152 153 asm volatile( 154 " diag %1,%2,0x260\n" 155 "0: lhi %0,0\n" 156 "1:\n" 157 EX_TABLE(0b,1b) 158 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc"); 159 160 if (ret || val0 != val1) 161 return -ENOSYS; 162 163 memory_chunk[0].size = val0; 164 return 0; 165 } 166 167 #define ADDR2G (1UL << 31) 168 169 static noinline __init unsigned long sclp_memory_detect(void) 170 { 171 struct sclp_readinfo_sccb *sccb; 172 unsigned long long memsize; 173 174 sccb = &s390_readinfo_sccb; 175 176 if (sccb->header.response_code != 0x10) 177 return 0; 178 179 if (sccb->rnsize) 180 memsize = sccb->rnsize << 20; 181 else 182 memsize = sccb->rnsize2 << 20; 183 if (sccb->rnmax) 184 memsize *= sccb->rnmax; 185 else 186 memsize *= sccb->rnmax2; 187 #ifndef CONFIG_64BIT 188 /* 189 * Can't deal with more than 2G in 31 bit addressing mode, so 190 * limit the value in order to avoid strange side effects. 191 */ 192 if (memsize > ADDR2G) 193 memsize = ADDR2G; 194 #endif 195 return (unsigned long) memsize; 196 } 197 198 static inline __init unsigned long __tprot(unsigned long addr) 199 { 200 int cc = -1; 201 202 asm volatile( 203 " tprot 0(%1),0\n" 204 "0: ipm %0\n" 205 " srl %0,28\n" 206 "1:\n" 207 EX_TABLE(0b,1b) 208 : "+d" (cc) : "a" (addr) : "cc"); 209 return (unsigned long)cc; 210 } 211 212 /* Checking memory in 128KB increments. */ 213 #define CHUNK_INCR (1UL << 17) 214 215 static noinline __init void find_memory_chunks(unsigned long memsize) 216 { 217 unsigned long addr = 0, old_addr = 0; 218 unsigned long old_cc = CHUNK_READ_WRITE; 219 unsigned long cc; 220 int chunk = 0; 221 222 while (chunk < MEMORY_CHUNKS) { 223 cc = __tprot(addr); 224 while (cc == old_cc) { 225 addr += CHUNK_INCR; 226 cc = __tprot(addr); 227 #ifndef CONFIG_64BIT 228 if (addr == ADDR2G) 229 break; 230 #endif 231 } 232 233 if (old_addr != addr && 234 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) { 235 memory_chunk[chunk].addr = old_addr; 236 memory_chunk[chunk].size = addr - old_addr; 237 memory_chunk[chunk].type = old_cc; 238 chunk++; 239 } 240 241 old_addr = addr; 242 old_cc = cc; 243 244 #ifndef CONFIG_64BIT 245 if (addr == ADDR2G) 246 break; 247 #endif 248 /* 249 * Finish memory detection at the first hole, unless 250 * - we reached the hsa -> skip it. 251 * - we know there must be more. 252 */ 253 if (cc == -1UL && !memsize && old_addr != ADDR2G) 254 break; 255 if (memsize && addr >= memsize) 256 break; 257 } 258 } 259 260 static __init void early_pgm_check_handler(void) 261 { 262 unsigned long addr; 263 const struct exception_table_entry *fixup; 264 265 addr = S390_lowcore.program_old_psw.addr; 266 fixup = search_exception_tables(addr & PSW_ADDR_INSN); 267 if (!fixup) 268 disabled_wait(0); 269 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; 270 } 271 272 static noinline __init void setup_lowcore_early(void) 273 { 274 psw_t psw; 275 276 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 277 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; 278 S390_lowcore.external_new_psw = psw; 279 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 280 S390_lowcore.program_new_psw = psw; 281 s390_base_pgm_handler_fn = early_pgm_check_handler; 282 } 283 284 /* 285 * Save ipl parameters, clear bss memory, initialize storage keys 286 * and create a kernel NSS at startup if the SAVESYS= parm is defined 287 */ 288 void __init startup_init(void) 289 { 290 unsigned long memsize; 291 292 ipl_save_parameters(); 293 clear_bss_section(); 294 init_kernel_storage_key(); 295 lockdep_init(); 296 lockdep_off(); 297 detect_machine_type(); 298 create_kernel_nss(); 299 sort_main_extable(); 300 setup_lowcore_early(); 301 sclp_readinfo_early(); 302 memsize = sclp_memory_detect(); 303 if (memory_fast_detect() < 0) 304 find_memory_chunks(memsize); 305 lockdep_on(); 306 } 307