1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Pvpanic Device Support 4 * 5 * Copyright (C) 2013 Fujitsu. 6 * Copyright (C) 2018 ZTE. 7 * Copyright (C) 2021 Oracle. 8 */ 9 10 #include <linux/device.h> 11 #include <linux/errno.h> 12 #include <linux/gfp_types.h> 13 #include <linux/io.h> 14 #include <linux/kexec.h> 15 #include <linux/kstrtox.h> 16 #include <linux/limits.h> 17 #include <linux/list.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/module.h> 20 #include <linux/panic_notifier.h> 21 #include <linux/platform_device.h> 22 #include <linux/reboot.h> 23 #include <linux/spinlock.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 27 #include <uapi/misc/pvpanic.h> 28 29 #include "pvpanic.h" 30 31 MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>"); 32 MODULE_DESCRIPTION("pvpanic device driver"); 33 MODULE_LICENSE("GPL"); 34 35 struct pvpanic_instance { 36 void __iomem *base; 37 unsigned int capability; 38 unsigned int events; 39 struct sys_off_handler *sys_off; 40 struct list_head list; 41 }; 42 43 static struct list_head pvpanic_list; 44 static spinlock_t pvpanic_lock; 45 46 static void 47 pvpanic_send_event(unsigned int event) 48 { 49 struct pvpanic_instance *pi_cur; 50 51 if (!spin_trylock(&pvpanic_lock)) 52 return; 53 54 list_for_each_entry(pi_cur, &pvpanic_list, list) { 55 if (event & pi_cur->capability & pi_cur->events) 56 iowrite8(event, pi_cur->base); 57 } 58 spin_unlock(&pvpanic_lock); 59 } 60 61 static int 62 pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused) 63 { 64 unsigned int event = PVPANIC_PANICKED; 65 66 if (kexec_crash_loaded()) 67 event = PVPANIC_CRASH_LOADED; 68 69 pvpanic_send_event(event); 70 71 return NOTIFY_DONE; 72 } 73 74 /* 75 * Call our notifier very early on panic, deferring the 76 * action taken to the hypervisor. 77 */ 78 static struct notifier_block pvpanic_panic_nb = { 79 .notifier_call = pvpanic_panic_notify, 80 .priority = INT_MAX, 81 }; 82 83 static int pvpanic_sys_off(struct sys_off_data *data) 84 { 85 pvpanic_send_event(PVPANIC_SHUTDOWN); 86 87 return NOTIFY_DONE; 88 } 89 90 static void pvpanic_synchronize_sys_off_handler(struct device *dev, struct pvpanic_instance *pi) 91 { 92 /* The kernel core has logic to fall back to system halt if no 93 * sys_off_handler is registered. 94 * When the pvpanic sys_off_handler is disabled via sysfs the kernel 95 * should use that fallback logic, so the handler needs to be unregistered. 96 */ 97 98 struct sys_off_handler *sys_off; 99 100 if (!(pi->events & PVPANIC_SHUTDOWN) == !pi->sys_off) 101 return; 102 103 if (!pi->sys_off) { 104 sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_LOW, 105 pvpanic_sys_off, NULL); 106 if (IS_ERR(sys_off)) 107 dev_warn(dev, "Could not register sys_off_handler: %pe\n", sys_off); 108 else 109 pi->sys_off = sys_off; 110 } else { 111 unregister_sys_off_handler(pi->sys_off); 112 pi->sys_off = NULL; 113 } 114 } 115 116 static void pvpanic_remove(void *param) 117 { 118 struct pvpanic_instance *pi_cur, *pi_next; 119 struct pvpanic_instance *pi = param; 120 121 spin_lock(&pvpanic_lock); 122 list_for_each_entry_safe(pi_cur, pi_next, &pvpanic_list, list) { 123 if (pi_cur == pi) { 124 list_del(&pi_cur->list); 125 break; 126 } 127 } 128 spin_unlock(&pvpanic_lock); 129 130 unregister_sys_off_handler(pi->sys_off); 131 } 132 133 static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) 134 { 135 struct pvpanic_instance *pi = dev_get_drvdata(dev); 136 137 return sysfs_emit(buf, "%x\n", pi->capability); 138 } 139 static DEVICE_ATTR_RO(capability); 140 141 static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf) 142 { 143 struct pvpanic_instance *pi = dev_get_drvdata(dev); 144 145 return sysfs_emit(buf, "%x\n", pi->events); 146 } 147 148 static ssize_t events_store(struct device *dev, struct device_attribute *attr, 149 const char *buf, size_t count) 150 { 151 struct pvpanic_instance *pi = dev_get_drvdata(dev); 152 unsigned int tmp; 153 int err; 154 155 err = kstrtouint(buf, 16, &tmp); 156 if (err) 157 return err; 158 159 if ((tmp & pi->capability) != tmp) 160 return -EINVAL; 161 162 pi->events = tmp; 163 pvpanic_synchronize_sys_off_handler(dev, pi); 164 165 return count; 166 } 167 static DEVICE_ATTR_RW(events); 168 169 static struct attribute *pvpanic_dev_attrs[] = { 170 &dev_attr_capability.attr, 171 &dev_attr_events.attr, 172 NULL 173 }; 174 175 static const struct attribute_group pvpanic_dev_group = { 176 .attrs = pvpanic_dev_attrs, 177 }; 178 179 const struct attribute_group *pvpanic_dev_groups[] = { 180 &pvpanic_dev_group, 181 NULL 182 }; 183 EXPORT_SYMBOL_GPL(pvpanic_dev_groups); 184 185 int devm_pvpanic_probe(struct device *dev, void __iomem *base) 186 { 187 struct pvpanic_instance *pi; 188 189 if (!base) 190 return -EINVAL; 191 192 pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL); 193 if (!pi) 194 return -ENOMEM; 195 196 pi->base = base; 197 pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED | PVPANIC_SHUTDOWN; 198 199 /* initlize capability by RDPT */ 200 pi->capability &= ioread8(base); 201 pi->events = pi->capability; 202 203 pi->sys_off = NULL; 204 pvpanic_synchronize_sys_off_handler(dev, pi); 205 206 spin_lock(&pvpanic_lock); 207 list_add(&pi->list, &pvpanic_list); 208 spin_unlock(&pvpanic_lock); 209 210 dev_set_drvdata(dev, pi); 211 212 return devm_add_action_or_reset(dev, pvpanic_remove, pi); 213 } 214 EXPORT_SYMBOL_GPL(devm_pvpanic_probe); 215 216 static int pvpanic_init(void) 217 { 218 INIT_LIST_HEAD(&pvpanic_list); 219 spin_lock_init(&pvpanic_lock); 220 221 atomic_notifier_chain_register(&panic_notifier_list, &pvpanic_panic_nb); 222 223 return 0; 224 } 225 module_init(pvpanic_init); 226 227 static void pvpanic_exit(void) 228 { 229 atomic_notifier_chain_unregister(&panic_notifier_list, &pvpanic_panic_nb); 230 231 } 232 module_exit(pvpanic_exit); 233