1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(c) 2014 Intel Mobile Communications GmbH 4 * Copyright(c) 2015 Intel Deutschland GmbH 5 * 6 * Author: Johannes Berg <johannes@sipsolutions.net> 7 */ 8 #include <linux/module.h> 9 #include <linux/device.h> 10 #include <linux/devcoredump.h> 11 #include <linux/list.h> 12 #include <linux/slab.h> 13 #include <linux/fs.h> 14 #include <linux/workqueue.h> 15 16 static struct class devcd_class; 17 18 /* global disable flag, for security purposes */ 19 static bool devcd_disabled; 20 21 struct devcd_entry { 22 struct device devcd_dev; 23 void *data; 24 size_t datalen; 25 /* 26 * Here, mutex is required to serialize the calls to del_wk work between 27 * user/kernel space which happens when devcd is added with device_add() 28 * and that sends uevent to user space. User space reads the uevents, 29 * and calls to devcd_data_write() which try to modify the work which is 30 * not even initialized/queued from devcoredump. 31 * 32 * 33 * 34 * cpu0(X) cpu1(Y) 35 * 36 * dev_coredump() uevent sent to user space 37 * device_add() ======================> user space process Y reads the 38 * uevents writes to devcd fd 39 * which results into writes to 40 * 41 * devcd_data_write() 42 * mod_delayed_work() 43 * try_to_grab_pending() 44 * del_timer() 45 * debug_assert_init() 46 * INIT_DELAYED_WORK() 47 * schedule_delayed_work() 48 * 49 * 50 * Also, mutex alone would not be enough to avoid scheduling of 51 * del_wk work after it get flush from a call to devcd_free() 52 * mentioned as below. 53 * 54 * disabled_store() 55 * devcd_free() 56 * mutex_lock() devcd_data_write() 57 * flush_delayed_work() 58 * mutex_unlock() 59 * mutex_lock() 60 * mod_delayed_work() 61 * mutex_unlock() 62 * So, delete_work flag is required. 63 */ 64 struct mutex mutex; 65 bool delete_work; 66 struct module *owner; 67 ssize_t (*read)(char *buffer, loff_t offset, size_t count, 68 void *data, size_t datalen); 69 void (*free)(void *data); 70 struct delayed_work del_wk; 71 struct device *failing_dev; 72 }; 73 74 static struct devcd_entry *dev_to_devcd(struct device *dev) 75 { 76 return container_of(dev, struct devcd_entry, devcd_dev); 77 } 78 79 static void devcd_dev_release(struct device *dev) 80 { 81 struct devcd_entry *devcd = dev_to_devcd(dev); 82 83 devcd->free(devcd->data); 84 module_put(devcd->owner); 85 86 /* 87 * this seems racy, but I don't see a notifier or such on 88 * a struct device to know when it goes away? 89 */ 90 if (devcd->failing_dev->kobj.sd) 91 sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj, 92 "devcoredump"); 93 94 put_device(devcd->failing_dev); 95 kfree(devcd); 96 } 97 98 static void devcd_del(struct work_struct *wk) 99 { 100 struct devcd_entry *devcd; 101 102 devcd = container_of(wk, struct devcd_entry, del_wk.work); 103 104 device_del(&devcd->devcd_dev); 105 put_device(&devcd->devcd_dev); 106 } 107 108 static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, 109 struct bin_attribute *bin_attr, 110 char *buffer, loff_t offset, size_t count) 111 { 112 struct device *dev = kobj_to_dev(kobj); 113 struct devcd_entry *devcd = dev_to_devcd(dev); 114 115 return devcd->read(buffer, offset, count, devcd->data, devcd->datalen); 116 } 117 118 static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, 119 struct bin_attribute *bin_attr, 120 char *buffer, loff_t offset, size_t count) 121 { 122 struct device *dev = kobj_to_dev(kobj); 123 struct devcd_entry *devcd = dev_to_devcd(dev); 124 125 mutex_lock(&devcd->mutex); 126 if (!devcd->delete_work) { 127 devcd->delete_work = true; 128 mod_delayed_work(system_wq, &devcd->del_wk, 0); 129 } 130 mutex_unlock(&devcd->mutex); 131 132 return count; 133 } 134 135 static struct bin_attribute devcd_attr_data = { 136 .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, 137 .size = 0, 138 .read = devcd_data_read, 139 .write = devcd_data_write, 140 }; 141 142 static struct bin_attribute *devcd_dev_bin_attrs[] = { 143 &devcd_attr_data, NULL, 144 }; 145 146 static const struct attribute_group devcd_dev_group = { 147 .bin_attrs = devcd_dev_bin_attrs, 148 }; 149 150 static const struct attribute_group *devcd_dev_groups[] = { 151 &devcd_dev_group, NULL, 152 }; 153 154 static int devcd_free(struct device *dev, void *data) 155 { 156 struct devcd_entry *devcd = dev_to_devcd(dev); 157 158 mutex_lock(&devcd->mutex); 159 if (!devcd->delete_work) 160 devcd->delete_work = true; 161 162 flush_delayed_work(&devcd->del_wk); 163 mutex_unlock(&devcd->mutex); 164 return 0; 165 } 166 167 static ssize_t disabled_show(const struct class *class, const struct class_attribute *attr, 168 char *buf) 169 { 170 return sysfs_emit(buf, "%d\n", devcd_disabled); 171 } 172 173 /* 174 * 175 * disabled_store() worker() 176 * class_for_each_device(&devcd_class, 177 * NULL, NULL, devcd_free) 178 * ... 179 * ... 180 * while ((dev = class_dev_iter_next(&iter)) 181 * devcd_del() 182 * device_del() 183 * put_device() <- last reference 184 * error = fn(dev, data) devcd_dev_release() 185 * devcd_free(dev, data) kfree(devcd) 186 * mutex_lock(&devcd->mutex); 187 * 188 * 189 * In the above diagram, it looks like disabled_store() would be racing with parallelly 190 * running devcd_del() and result in memory abort while acquiring devcd->mutex which 191 * is called after kfree of devcd memory after dropping its last reference with 192 * put_device(). However, this will not happens as fn(dev, data) runs 193 * with its own reference to device via klist_node so it is not its last reference. 194 * so, above situation would not occur. 195 */ 196 197 static ssize_t disabled_store(const struct class *class, const struct class_attribute *attr, 198 const char *buf, size_t count) 199 { 200 long tmp = simple_strtol(buf, NULL, 10); 201 202 /* 203 * This essentially makes the attribute write-once, since you can't 204 * go back to not having it disabled. This is intentional, it serves 205 * as a system lockdown feature. 206 */ 207 if (tmp != 1) 208 return -EINVAL; 209 210 devcd_disabled = true; 211 212 class_for_each_device(&devcd_class, NULL, NULL, devcd_free); 213 214 return count; 215 } 216 static CLASS_ATTR_RW(disabled); 217 218 static struct attribute *devcd_class_attrs[] = { 219 &class_attr_disabled.attr, 220 NULL, 221 }; 222 ATTRIBUTE_GROUPS(devcd_class); 223 224 static struct class devcd_class = { 225 .name = "devcoredump", 226 .dev_release = devcd_dev_release, 227 .dev_groups = devcd_dev_groups, 228 .class_groups = devcd_class_groups, 229 }; 230 231 static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count, 232 void *data, size_t datalen) 233 { 234 return memory_read_from_buffer(buffer, count, &offset, data, datalen); 235 } 236 237 static void devcd_freev(void *data) 238 { 239 vfree(data); 240 } 241 242 /** 243 * dev_coredumpv - create device coredump with vmalloc data 244 * @dev: the struct device for the crashed device 245 * @data: vmalloc data containing the device coredump 246 * @datalen: length of the data 247 * @gfp: allocation flags 248 * 249 * This function takes ownership of the vmalloc'ed data and will free 250 * it when it is no longer used. See dev_coredumpm() for more information. 251 */ 252 void dev_coredumpv(struct device *dev, void *data, size_t datalen, 253 gfp_t gfp) 254 { 255 dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev); 256 } 257 EXPORT_SYMBOL_GPL(dev_coredumpv); 258 259 static int devcd_match_failing(struct device *dev, const void *failing) 260 { 261 struct devcd_entry *devcd = dev_to_devcd(dev); 262 263 return devcd->failing_dev == failing; 264 } 265 266 /** 267 * devcd_free_sgtable - free all the memory of the given scatterlist table 268 * (i.e. both pages and scatterlist instances) 269 * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained 270 * using the sg_chain function then that function should be called only once 271 * on the chained table 272 * @data: pointer to sg_table to free 273 */ 274 static void devcd_free_sgtable(void *data) 275 { 276 _devcd_free_sgtable(data); 277 } 278 279 /** 280 * devcd_read_from_sgtable - copy data from sg_table to a given buffer 281 * and return the number of bytes read 282 * @buffer: the buffer to copy the data to it 283 * @buf_len: the length of the buffer 284 * @data: the scatterlist table to copy from 285 * @offset: start copy from @offset@ bytes from the head of the data 286 * in the given scatterlist 287 * @data_len: the length of the data in the sg_table 288 * 289 * Returns: the number of bytes copied 290 */ 291 static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, 292 size_t buf_len, void *data, 293 size_t data_len) 294 { 295 struct scatterlist *table = data; 296 297 if (offset > data_len) 298 return -EINVAL; 299 300 if (offset + buf_len > data_len) 301 buf_len = data_len - offset; 302 return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, 303 offset); 304 } 305 306 /** 307 * dev_coredump_put - remove device coredump 308 * @dev: the struct device for the crashed device 309 * 310 * dev_coredump_put() removes coredump, if exists, for a given device from 311 * the file system and free its associated data otherwise, does nothing. 312 * 313 * It is useful for modules that do not want to keep coredump 314 * available after its unload. 315 */ 316 void dev_coredump_put(struct device *dev) 317 { 318 struct device *existing; 319 320 existing = class_find_device(&devcd_class, NULL, dev, 321 devcd_match_failing); 322 if (existing) { 323 devcd_free(existing, NULL); 324 put_device(existing); 325 } 326 } 327 EXPORT_SYMBOL_GPL(dev_coredump_put); 328 329 /** 330 * dev_coredumpm_timeout - create device coredump with read/free methods with a 331 * custom timeout. 332 * @dev: the struct device for the crashed device 333 * @owner: the module that contains the read/free functions, use %THIS_MODULE 334 * @data: data cookie for the @read/@free functions 335 * @datalen: length of the data 336 * @gfp: allocation flags 337 * @read: function to read from the given buffer 338 * @free: function to free the given buffer 339 * @timeout: time in jiffies to remove coredump 340 * 341 * Creates a new device coredump for the given device. If a previous one hasn't 342 * been read yet, the new coredump is discarded. The data lifetime is determined 343 * by the device coredump framework and when it is no longer needed the @free 344 * function will be called to free the data. 345 */ 346 void dev_coredumpm_timeout(struct device *dev, struct module *owner, 347 void *data, size_t datalen, gfp_t gfp, 348 ssize_t (*read)(char *buffer, loff_t offset, 349 size_t count, void *data, 350 size_t datalen), 351 void (*free)(void *data), 352 unsigned long timeout) 353 { 354 static atomic_t devcd_count = ATOMIC_INIT(0); 355 struct devcd_entry *devcd; 356 struct device *existing; 357 358 if (devcd_disabled) 359 goto free; 360 361 existing = class_find_device(&devcd_class, NULL, dev, 362 devcd_match_failing); 363 if (existing) { 364 put_device(existing); 365 goto free; 366 } 367 368 if (!try_module_get(owner)) 369 goto free; 370 371 devcd = kzalloc(sizeof(*devcd), gfp); 372 if (!devcd) 373 goto put_module; 374 375 devcd->owner = owner; 376 devcd->data = data; 377 devcd->datalen = datalen; 378 devcd->read = read; 379 devcd->free = free; 380 devcd->failing_dev = get_device(dev); 381 devcd->delete_work = false; 382 383 mutex_init(&devcd->mutex); 384 device_initialize(&devcd->devcd_dev); 385 386 dev_set_name(&devcd->devcd_dev, "devcd%d", 387 atomic_inc_return(&devcd_count)); 388 devcd->devcd_dev.class = &devcd_class; 389 390 mutex_lock(&devcd->mutex); 391 dev_set_uevent_suppress(&devcd->devcd_dev, true); 392 if (device_add(&devcd->devcd_dev)) 393 goto put_device; 394 395 /* 396 * These should normally not fail, but there is no problem 397 * continuing without the links, so just warn instead of 398 * failing. 399 */ 400 if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj, 401 "failing_device") || 402 sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj, 403 "devcoredump")) 404 dev_warn(dev, "devcoredump create_link failed\n"); 405 406 dev_set_uevent_suppress(&devcd->devcd_dev, false); 407 kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); 408 INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); 409 schedule_delayed_work(&devcd->del_wk, timeout); 410 mutex_unlock(&devcd->mutex); 411 return; 412 put_device: 413 put_device(&devcd->devcd_dev); 414 mutex_unlock(&devcd->mutex); 415 put_module: 416 module_put(owner); 417 free: 418 free(data); 419 } 420 EXPORT_SYMBOL_GPL(dev_coredumpm_timeout); 421 422 /** 423 * dev_coredumpsg - create device coredump that uses scatterlist as data 424 * parameter 425 * @dev: the struct device for the crashed device 426 * @table: the dump data 427 * @datalen: length of the data 428 * @gfp: allocation flags 429 * 430 * Creates a new device coredump for the given device. If a previous one hasn't 431 * been read yet, the new coredump is discarded. The data lifetime is determined 432 * by the device coredump framework and when it is no longer needed 433 * it will free the data. 434 */ 435 void dev_coredumpsg(struct device *dev, struct scatterlist *table, 436 size_t datalen, gfp_t gfp) 437 { 438 dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable, 439 devcd_free_sgtable); 440 } 441 EXPORT_SYMBOL_GPL(dev_coredumpsg); 442 443 static int __init devcoredump_init(void) 444 { 445 return class_register(&devcd_class); 446 } 447 __initcall(devcoredump_init); 448 449 static void __exit devcoredump_exit(void) 450 { 451 class_for_each_device(&devcd_class, NULL, NULL, devcd_free); 452 class_unregister(&devcd_class); 453 } 454 __exitcall(devcoredump_exit); 455