1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2012 Google, Inc. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/compiler.h> 8 #include <linux/irqflags.h> 9 #include <linux/percpu.h> 10 #include <linux/smp.h> 11 #include <linux/atomic.h> 12 #include <linux/types.h> 13 #include <linux/mutex.h> 14 #include <linux/ftrace.h> 15 #include <linux/fs.h> 16 #include <linux/debugfs.h> 17 #include <linux/err.h> 18 #include <linux/cache.h> 19 #include <linux/slab.h> 20 #include <asm/barrier.h> 21 #include <asm/setup.h> 22 #include "internal.h" 23 24 /* This doesn't need to be atomic: speed is chosen over correctness here. */ 25 static u64 pstore_ftrace_stamp; 26 27 static inline unsigned long adjust_ip(unsigned long ip) 28 { 29 #if defined(CONFIG_RANDOMIZE_BASE) && !defined(PSTORE_CPU_IN_IP) && IS_BUILTIN(CONFIG_PSTORE) 30 if (core_kernel_text(ip)) 31 return ip - kaslr_offset(); 32 33 __clear_bit(BITS_PER_LONG - 1, &ip); 34 #endif 35 return ip; 36 } 37 38 inline unsigned long decode_ip(unsigned long ip) 39 { 40 #if defined(CONFIG_RANDOMIZE_BASE) && !defined(PSTORE_CPU_IN_IP) && IS_BUILTIN(CONFIG_PSTORE) 41 if (test_bit(BITS_PER_LONG - 1, &ip)) 42 return ip + kaslr_offset(); 43 44 __set_bit(BITS_PER_LONG - 1, &ip); 45 46 #endif 47 return ip; 48 } 49 50 static void notrace pstore_ftrace_call(unsigned long ip, 51 unsigned long parent_ip, 52 struct ftrace_ops *op, 53 struct ftrace_regs *fregs) 54 { 55 int bit; 56 unsigned long flags; 57 struct pstore_ftrace_record rec = {}; 58 struct pstore_record record = { 59 .type = PSTORE_TYPE_FTRACE, 60 .buf = (char *)&rec, 61 .size = sizeof(rec), 62 .psi = psinfo, 63 }; 64 65 if (unlikely(oops_in_progress)) 66 return; 67 68 bit = ftrace_test_recursion_trylock(ip, parent_ip); 69 if (bit < 0) 70 return; 71 72 local_irq_save(flags); 73 74 rec.ip = adjust_ip(ip); 75 rec.parent_ip = adjust_ip(parent_ip); 76 pstore_ftrace_write_timestamp(&rec, pstore_ftrace_stamp++); 77 pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id()); 78 psinfo->write(&record); 79 80 local_irq_restore(flags); 81 ftrace_test_recursion_unlock(bit); 82 } 83 84 static struct ftrace_ops pstore_ftrace_ops __read_mostly = { 85 .func = pstore_ftrace_call, 86 }; 87 88 static DEFINE_MUTEX(pstore_ftrace_lock); 89 static bool record_ftrace; 90 module_param(record_ftrace, bool, 0400); 91 MODULE_PARM_DESC(record_ftrace, 92 "enable ftrace recording immediately (default: off)"); 93 94 static int pstore_set_ftrace_enabled(bool on) 95 { 96 ssize_t ret; 97 98 if (on == record_ftrace) 99 return 0; 100 101 if (on) { 102 ftrace_ops_set_global_filter(&pstore_ftrace_ops); 103 ret = register_ftrace_function(&pstore_ftrace_ops); 104 } else { 105 ret = unregister_ftrace_function(&pstore_ftrace_ops); 106 } 107 108 if (ret) { 109 pr_err("%s: unable to %sregister ftrace ops: %zd\n", 110 __func__, on ? "" : "un", ret); 111 } else { 112 record_ftrace = on; 113 } 114 115 return ret; 116 } 117 118 static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf, 119 size_t count, loff_t *ppos) 120 { 121 u8 on; 122 ssize_t ret; 123 124 ret = kstrtou8_from_user(buf, count, 2, &on); 125 if (ret) 126 return ret; 127 128 mutex_lock(&pstore_ftrace_lock); 129 ret = pstore_set_ftrace_enabled(on); 130 mutex_unlock(&pstore_ftrace_lock); 131 132 if (ret == 0) 133 ret = count; 134 135 return ret; 136 } 137 138 static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf, 139 size_t count, loff_t *ppos) 140 { 141 char val[] = { '0' + record_ftrace, '\n' }; 142 143 return simple_read_from_buffer(buf, count, ppos, val, sizeof(val)); 144 } 145 146 static const struct file_operations pstore_knob_fops = { 147 .open = simple_open, 148 .read = pstore_ftrace_knob_read, 149 .write = pstore_ftrace_knob_write, 150 }; 151 152 static struct dentry *pstore_ftrace_dir; 153 154 void pstore_register_ftrace(void) 155 { 156 if (!psinfo->write) 157 return; 158 159 pstore_ftrace_dir = debugfs_create_dir("pstore", NULL); 160 161 pstore_set_ftrace_enabled(record_ftrace); 162 163 debugfs_create_file("record_ftrace", 0600, pstore_ftrace_dir, NULL, 164 &pstore_knob_fops); 165 } 166 167 void pstore_unregister_ftrace(void) 168 { 169 mutex_lock(&pstore_ftrace_lock); 170 if (record_ftrace) { 171 unregister_ftrace_function(&pstore_ftrace_ops); 172 record_ftrace = false; 173 } 174 mutex_unlock(&pstore_ftrace_lock); 175 176 debugfs_remove_recursive(pstore_ftrace_dir); 177 } 178 179 ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size, 180 const char *src_log, size_t src_log_size) 181 { 182 size_t dest_size, src_size, total, dest_off, src_off; 183 size_t dest_idx = 0, src_idx = 0, merged_idx = 0; 184 void *merged_buf; 185 struct pstore_ftrace_record *drec, *srec, *mrec; 186 size_t record_size = sizeof(struct pstore_ftrace_record); 187 188 dest_off = *dest_log_size % record_size; 189 dest_size = *dest_log_size - dest_off; 190 191 src_off = src_log_size % record_size; 192 src_size = src_log_size - src_off; 193 194 total = dest_size + src_size; 195 merged_buf = kmalloc(total, GFP_KERNEL); 196 if (!merged_buf) 197 return -ENOMEM; 198 199 drec = (struct pstore_ftrace_record *)(*dest_log + dest_off); 200 srec = (struct pstore_ftrace_record *)(src_log + src_off); 201 mrec = (struct pstore_ftrace_record *)(merged_buf); 202 203 while (dest_size > 0 && src_size > 0) { 204 if (pstore_ftrace_read_timestamp(&drec[dest_idx]) < 205 pstore_ftrace_read_timestamp(&srec[src_idx])) { 206 mrec[merged_idx++] = drec[dest_idx++]; 207 dest_size -= record_size; 208 } else { 209 mrec[merged_idx++] = srec[src_idx++]; 210 src_size -= record_size; 211 } 212 } 213 214 while (dest_size > 0) { 215 mrec[merged_idx++] = drec[dest_idx++]; 216 dest_size -= record_size; 217 } 218 219 while (src_size > 0) { 220 mrec[merged_idx++] = srec[src_idx++]; 221 src_size -= record_size; 222 } 223 224 kfree(*dest_log); 225 *dest_log = merged_buf; 226 *dest_log_size = total; 227 228 return 0; 229 } 230 EXPORT_SYMBOL_GPL(pstore_ftrace_combine_log); 231