1 // SPDX-License-Identifier: GPL-1.0+ 2 /* 3 * zcore module to export memory content and register sets for creating system 4 * dumps on SCSI/NVMe disks (zfcp/nvme dump). 5 * 6 * For more information please refer to Documentation/arch/s390/zfcpdump.rst 7 * 8 * Copyright IBM Corp. 2003, 2008 9 * Author(s): Michael Holzheu 10 */ 11 12 #define KMSG_COMPONENT "zdump" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/debugfs.h> 18 #include <linux/panic_notifier.h> 19 #include <linux/reboot.h> 20 #include <linux/uio.h> 21 22 #include <asm/asm-offsets.h> 23 #include <asm/ipl.h> 24 #include <asm/sclp.h> 25 #include <asm/setup.h> 26 #include <linux/uaccess.h> 27 #include <asm/debug.h> 28 #include <asm/processor.h> 29 #include <asm/irqflags.h> 30 #include <asm/checksum.h> 31 #include <asm/os_info.h> 32 #include <asm/maccess.h> 33 #include "sclp.h" 34 35 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) 36 37 enum arch_id { 38 ARCH_S390 = 0, 39 ARCH_S390X = 1, 40 }; 41 42 struct ipib_info { 43 unsigned long ipib; 44 u32 checksum; 45 } __attribute__((packed)); 46 47 static struct debug_info *zcore_dbf; 48 static int hsa_available; 49 static struct dentry *zcore_dir; 50 static struct dentry *zcore_reipl_file; 51 static struct dentry *zcore_hsa_file; 52 static struct ipl_parameter_block *zcore_ipl_block; 53 static unsigned long os_info_flags; 54 55 static DEFINE_MUTEX(hsa_buf_mutex); 56 static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); 57 58 /* 59 * Copy memory from HSA to iterator (not reentrant): 60 * 61 * @iter: Iterator where memory should be copied to 62 * @src: Start address within HSA where data should be copied 63 * @count: Size of buffer, which should be copied 64 */ 65 size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count) 66 { 67 size_t bytes, copied, res = 0; 68 unsigned long offset; 69 70 if (!hsa_available) 71 return 0; 72 73 mutex_lock(&hsa_buf_mutex); 74 while (count) { 75 if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) { 76 TRACE("sclp_sdias_copy() failed\n"); 77 break; 78 } 79 offset = src % PAGE_SIZE; 80 bytes = min(PAGE_SIZE - offset, count); 81 copied = copy_to_iter(hsa_buf + offset, bytes, iter); 82 count -= copied; 83 src += copied; 84 res += copied; 85 if (copied < bytes) 86 break; 87 } 88 mutex_unlock(&hsa_buf_mutex); 89 return res; 90 } 91 92 /* 93 * Copy memory from HSA to kernel memory (not reentrant): 94 * 95 * @dest: Kernel or user buffer where memory should be copied to 96 * @src: Start address within HSA where data should be copied 97 * @count: Size of buffer, which should be copied 98 */ 99 static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count) 100 { 101 struct iov_iter iter; 102 struct kvec kvec; 103 104 kvec.iov_base = dst; 105 kvec.iov_len = count; 106 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 107 if (memcpy_hsa_iter(&iter, src, count) < count) 108 return -EIO; 109 return 0; 110 } 111 112 static int __init init_cpu_info(void) 113 { 114 struct save_area *sa; 115 116 /* get info for boot cpu from lowcore, stored in the HSA */ 117 sa = save_area_boot_cpu(); 118 if (!sa) 119 return -ENOMEM; 120 if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) { 121 TRACE("could not copy from HSA\n"); 122 return -EIO; 123 } 124 save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */ 125 return 0; 126 } 127 128 /* 129 * Release the HSA 130 */ 131 static void release_hsa(void) 132 { 133 diag308(DIAG308_REL_HSA, NULL); 134 hsa_available = 0; 135 } 136 137 static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, 138 size_t count, loff_t *ppos) 139 { 140 if (zcore_ipl_block) { 141 diag308(DIAG308_SET, zcore_ipl_block); 142 if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR) 143 diag308(DIAG308_LOAD_CLEAR, NULL); 144 /* Use special diag308 subcode for CCW normal ipl */ 145 if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW) 146 diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); 147 else 148 diag308(DIAG308_LOAD_NORMAL, NULL); 149 } 150 return count; 151 } 152 153 static int zcore_reipl_open(struct inode *inode, struct file *filp) 154 { 155 return stream_open(inode, filp); 156 } 157 158 static int zcore_reipl_release(struct inode *inode, struct file *filp) 159 { 160 return 0; 161 } 162 163 static const struct file_operations zcore_reipl_fops = { 164 .owner = THIS_MODULE, 165 .write = zcore_reipl_write, 166 .open = zcore_reipl_open, 167 .release = zcore_reipl_release, 168 .llseek = no_llseek, 169 }; 170 171 static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, 172 size_t count, loff_t *ppos) 173 { 174 static char str[18]; 175 176 if (hsa_available) 177 snprintf(str, sizeof(str), "%lx\n", sclp.hsa_size); 178 else 179 snprintf(str, sizeof(str), "0\n"); 180 return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); 181 } 182 183 static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf, 184 size_t count, loff_t *ppos) 185 { 186 char value; 187 188 if (*ppos != 0) 189 return -EPIPE; 190 if (copy_from_user(&value, buf, 1)) 191 return -EFAULT; 192 if (value != '0') 193 return -EINVAL; 194 release_hsa(); 195 return count; 196 } 197 198 static const struct file_operations zcore_hsa_fops = { 199 .owner = THIS_MODULE, 200 .write = zcore_hsa_write, 201 .read = zcore_hsa_read, 202 .open = nonseekable_open, 203 .llseek = no_llseek, 204 }; 205 206 static int __init check_sdias(void) 207 { 208 if (!sclp.hsa_size) { 209 TRACE("Could not determine HSA size\n"); 210 return -ENODEV; 211 } 212 return 0; 213 } 214 215 /* 216 * Provide IPL parameter information block from either HSA or memory 217 * for future reipl 218 */ 219 static int __init zcore_reipl_init(void) 220 { 221 struct os_info_entry *entry; 222 struct ipib_info ipib_info; 223 unsigned long os_info_addr; 224 struct os_info *os_info; 225 int rc; 226 227 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info)); 228 if (rc) 229 return rc; 230 if (ipib_info.ipib == 0) 231 return 0; 232 zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL); 233 if (!zcore_ipl_block) 234 return -ENOMEM; 235 if (ipib_info.ipib < sclp.hsa_size) 236 rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib, 237 PAGE_SIZE); 238 else 239 rc = memcpy_real(zcore_ipl_block, ipib_info.ipib, PAGE_SIZE); 240 if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) != 241 ipib_info.checksum) { 242 TRACE("Checksum does not match\n"); 243 free_page((unsigned long) zcore_ipl_block); 244 zcore_ipl_block = NULL; 245 } 246 /* 247 * Read the bit-flags field from os_info flags entry. 248 * Return zero even for os_info read or entry checksum errors in order 249 * to continue dump processing, considering that os_info could be 250 * corrupted on the panicked system. 251 */ 252 os_info = (void *)__get_free_page(GFP_KERNEL); 253 if (!os_info) 254 return -ENOMEM; 255 rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr)); 256 if (rc) 257 goto out; 258 if (os_info_addr < sclp.hsa_size) 259 rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE); 260 else 261 rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE); 262 if (rc || os_info_csum(os_info) != os_info->csum) 263 goto out; 264 entry = &os_info->entry[OS_INFO_FLAGS_ENTRY]; 265 if (entry->addr && entry->size) { 266 if (entry->addr < sclp.hsa_size) 267 rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags)); 268 else 269 rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags)); 270 if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum) 271 os_info_flags = 0; 272 } 273 out: 274 free_page((unsigned long)os_info); 275 return 0; 276 } 277 278 static int zcore_reboot_and_on_panic_handler(struct notifier_block *self, 279 unsigned long event, 280 void *data) 281 { 282 if (hsa_available) 283 release_hsa(); 284 285 return NOTIFY_OK; 286 } 287 288 static struct notifier_block zcore_reboot_notifier = { 289 .notifier_call = zcore_reboot_and_on_panic_handler, 290 /* we need to be notified before reipl and kdump */ 291 .priority = INT_MAX, 292 }; 293 294 static struct notifier_block zcore_on_panic_notifier = { 295 .notifier_call = zcore_reboot_and_on_panic_handler, 296 /* we need to be notified before reipl and kdump */ 297 .priority = INT_MAX, 298 }; 299 300 static int __init zcore_init(void) 301 { 302 unsigned char arch; 303 int rc; 304 305 if (!is_ipl_type_dump()) 306 return -ENODATA; 307 if (oldmem_data.start) 308 return -ENODATA; 309 310 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); 311 debug_register_view(zcore_dbf, &debug_sprintf_view); 312 debug_set_level(zcore_dbf, 6); 313 314 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 315 TRACE("type: fcp\n"); 316 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); 317 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); 318 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); 319 } else if (ipl_info.type == IPL_TYPE_NVME_DUMP) { 320 TRACE("type: nvme\n"); 321 TRACE("fid: %x\n", ipl_info.data.nvme.fid); 322 TRACE("nsid: %x\n", ipl_info.data.nvme.nsid); 323 } else if (ipl_info.type == IPL_TYPE_ECKD_DUMP) { 324 TRACE("type: eckd\n"); 325 TRACE("devno: %x\n", ipl_info.data.eckd.dev_id.devno); 326 TRACE("ssid: %x\n", ipl_info.data.eckd.dev_id.ssid); 327 } 328 329 rc = sclp_sdias_init(); 330 if (rc) 331 goto fail; 332 333 rc = check_sdias(); 334 if (rc) 335 goto fail; 336 hsa_available = 1; 337 338 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 339 if (rc) 340 goto fail; 341 342 if (arch == ARCH_S390) { 343 pr_alert("The 64-bit dump tool cannot be used for a " 344 "32-bit system\n"); 345 rc = -EINVAL; 346 goto fail; 347 } 348 349 pr_alert("The dump process started for a 64-bit operating system\n"); 350 rc = init_cpu_info(); 351 if (rc) 352 goto fail; 353 354 rc = zcore_reipl_init(); 355 if (rc) 356 goto fail; 357 358 zcore_dir = debugfs_create_dir("zcore" , NULL); 359 zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir, 360 NULL, &zcore_reipl_fops); 361 zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, 362 NULL, &zcore_hsa_fops); 363 364 register_reboot_notifier(&zcore_reboot_notifier); 365 atomic_notifier_chain_register(&panic_notifier_list, &zcore_on_panic_notifier); 366 367 return 0; 368 fail: 369 diag308(DIAG308_REL_HSA, NULL); 370 return rc; 371 } 372 subsys_initcall(zcore_init); 373