xref: /linux/fs/sysfs/file.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  * fs/sysfs/file.c - sysfs regular (text) file implementation
3  *
4  * Copyright (c) 2001-3 Patrick Mochel
5  * Copyright (c) 2007 SUSE Linux Products GmbH
6  * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
7  *
8  * This file is released under the GPLv2.
9  *
10  * Please see Documentation/filesystems/sysfs.txt for more information.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kobject.h>
15 #include <linux/kallsyms.h>
16 #include <linux/slab.h>
17 #include <linux/fsnotify.h>
18 #include <linux/namei.h>
19 #include <linux/poll.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/limits.h>
23 #include <asm/uaccess.h>
24 
25 #include "sysfs.h"
26 
27 /* used in crash dumps to help with debugging */
28 static char last_sysfs_file[PATH_MAX];
29 void sysfs_printk_last_file(void)
30 {
31 	printk(KERN_EMERG "last sysfs file: %s\n", last_sysfs_file);
32 }
33 
34 /*
35  * There's one sysfs_buffer for each open file and one
36  * sysfs_open_dirent for each sysfs_dirent with one or more open
37  * files.
38  *
39  * filp->private_data points to sysfs_buffer and
40  * sysfs_dirent->s_attr.open points to sysfs_open_dirent.  s_attr.open
41  * is protected by sysfs_open_dirent_lock.
42  */
43 static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
44 
45 struct sysfs_open_dirent {
46 	atomic_t		refcnt;
47 	atomic_t		event;
48 	wait_queue_head_t	poll;
49 	struct list_head	buffers; /* goes through sysfs_buffer.list */
50 };
51 
52 struct sysfs_buffer {
53 	size_t			count;
54 	loff_t			pos;
55 	char			* page;
56 	struct sysfs_ops	* ops;
57 	struct mutex		mutex;
58 	int			needs_read_fill;
59 	int			event;
60 	struct list_head	list;
61 };
62 
63 /**
64  *	fill_read_buffer - allocate and fill buffer from object.
65  *	@dentry:	dentry pointer.
66  *	@buffer:	data buffer for file.
67  *
68  *	Allocate @buffer->page, if it hasn't been already, then call the
69  *	kobject's show() method to fill the buffer with this attribute's
70  *	data.
71  *	This is called only once, on the file's first read unless an error
72  *	is returned.
73  */
74 static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
75 {
76 	struct sysfs_dirent *attr_sd = dentry->d_fsdata;
77 	struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
78 	struct sysfs_ops * ops = buffer->ops;
79 	int ret = 0;
80 	ssize_t count;
81 
82 	if (!buffer->page)
83 		buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
84 	if (!buffer->page)
85 		return -ENOMEM;
86 
87 	/* need attr_sd for attr and ops, its parent for kobj */
88 	if (!sysfs_get_active_two(attr_sd))
89 		return -ENODEV;
90 
91 	buffer->event = atomic_read(&attr_sd->s_attr.open->event);
92 	count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
93 
94 	sysfs_put_active_two(attr_sd);
95 
96 	/*
97 	 * The code works fine with PAGE_SIZE return but it's likely to
98 	 * indicate truncated result or overflow in normal use cases.
99 	 */
100 	if (count >= (ssize_t)PAGE_SIZE) {
101 		print_symbol("fill_read_buffer: %s returned bad count\n",
102 			(unsigned long)ops->show);
103 		/* Try to struggle along */
104 		count = PAGE_SIZE - 1;
105 	}
106 	if (count >= 0) {
107 		buffer->needs_read_fill = 0;
108 		buffer->count = count;
109 	} else {
110 		ret = count;
111 	}
112 	return ret;
113 }
114 
115 /**
116  *	sysfs_read_file - read an attribute.
117  *	@file:	file pointer.
118  *	@buf:	buffer to fill.
119  *	@count:	number of bytes to read.
120  *	@ppos:	starting offset in file.
121  *
122  *	Userspace wants to read an attribute file. The attribute descriptor
123  *	is in the file's ->d_fsdata. The target object is in the directory's
124  *	->d_fsdata.
125  *
126  *	We call fill_read_buffer() to allocate and fill the buffer from the
127  *	object's show() method exactly once (if the read is happening from
128  *	the beginning of the file). That should fill the entire buffer with
129  *	all the data the object has to offer for that attribute.
130  *	We then call flush_read_buffer() to copy the buffer to userspace
131  *	in the increments specified.
132  */
133 
134 static ssize_t
135 sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
136 {
137 	struct sysfs_buffer * buffer = file->private_data;
138 	ssize_t retval = 0;
139 
140 	mutex_lock(&buffer->mutex);
141 	if (buffer->needs_read_fill || *ppos == 0) {
142 		retval = fill_read_buffer(file->f_path.dentry,buffer);
143 		if (retval)
144 			goto out;
145 	}
146 	pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
147 		 __func__, count, *ppos, buffer->page);
148 	retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
149 					 buffer->count);
150 out:
151 	mutex_unlock(&buffer->mutex);
152 	return retval;
153 }
154 
155 /**
156  *	fill_write_buffer - copy buffer from userspace.
157  *	@buffer:	data buffer for file.
158  *	@buf:		data from user.
159  *	@count:		number of bytes in @userbuf.
160  *
161  *	Allocate @buffer->page if it hasn't been already, then
162  *	copy the user-supplied buffer into it.
163  */
164 
165 static int
166 fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t count)
167 {
168 	int error;
169 
170 	if (!buffer->page)
171 		buffer->page = (char *)get_zeroed_page(GFP_KERNEL);
172 	if (!buffer->page)
173 		return -ENOMEM;
174 
175 	if (count >= PAGE_SIZE)
176 		count = PAGE_SIZE - 1;
177 	error = copy_from_user(buffer->page,buf,count);
178 	buffer->needs_read_fill = 1;
179 	/* if buf is assumed to contain a string, terminate it by \0,
180 	   so e.g. sscanf() can scan the string easily */
181 	buffer->page[count] = 0;
182 	return error ? -EFAULT : count;
183 }
184 
185 
186 /**
187  *	flush_write_buffer - push buffer to kobject.
188  *	@dentry:	dentry to the attribute
189  *	@buffer:	data buffer for file.
190  *	@count:		number of bytes
191  *
192  *	Get the correct pointers for the kobject and the attribute we're
193  *	dealing with, then call the store() method for the attribute,
194  *	passing the buffer that we acquired in fill_write_buffer().
195  */
196 
197 static int
198 flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
199 {
200 	struct sysfs_dirent *attr_sd = dentry->d_fsdata;
201 	struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
202 	struct sysfs_ops * ops = buffer->ops;
203 	int rc;
204 
205 	/* need attr_sd for attr and ops, its parent for kobj */
206 	if (!sysfs_get_active_two(attr_sd))
207 		return -ENODEV;
208 
209 	rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
210 
211 	sysfs_put_active_two(attr_sd);
212 
213 	return rc;
214 }
215 
216 
217 /**
218  *	sysfs_write_file - write an attribute.
219  *	@file:	file pointer
220  *	@buf:	data to write
221  *	@count:	number of bytes
222  *	@ppos:	starting offset
223  *
224  *	Similar to sysfs_read_file(), though working in the opposite direction.
225  *	We allocate and fill the data from the user in fill_write_buffer(),
226  *	then push it to the kobject in flush_write_buffer().
227  *	There is no easy way for us to know if userspace is only doing a partial
228  *	write, so we don't support them. We expect the entire buffer to come
229  *	on the first write.
230  *	Hint: if you're writing a value, first read the file, modify only the
231  *	the value you're changing, then write entire buffer back.
232  */
233 
234 static ssize_t
235 sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
236 {
237 	struct sysfs_buffer * buffer = file->private_data;
238 	ssize_t len;
239 
240 	mutex_lock(&buffer->mutex);
241 	len = fill_write_buffer(buffer, buf, count);
242 	if (len > 0)
243 		len = flush_write_buffer(file->f_path.dentry, buffer, len);
244 	if (len > 0)
245 		*ppos += len;
246 	mutex_unlock(&buffer->mutex);
247 	return len;
248 }
249 
250 /**
251  *	sysfs_get_open_dirent - get or create sysfs_open_dirent
252  *	@sd: target sysfs_dirent
253  *	@buffer: sysfs_buffer for this instance of open
254  *
255  *	If @sd->s_attr.open exists, increment its reference count;
256  *	otherwise, create one.  @buffer is chained to the buffers
257  *	list.
258  *
259  *	LOCKING:
260  *	Kernel thread context (may sleep).
261  *
262  *	RETURNS:
263  *	0 on success, -errno on failure.
264  */
265 static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
266 				 struct sysfs_buffer *buffer)
267 {
268 	struct sysfs_open_dirent *od, *new_od = NULL;
269 
270  retry:
271 	spin_lock(&sysfs_open_dirent_lock);
272 
273 	if (!sd->s_attr.open && new_od) {
274 		sd->s_attr.open = new_od;
275 		new_od = NULL;
276 	}
277 
278 	od = sd->s_attr.open;
279 	if (od) {
280 		atomic_inc(&od->refcnt);
281 		list_add_tail(&buffer->list, &od->buffers);
282 	}
283 
284 	spin_unlock(&sysfs_open_dirent_lock);
285 
286 	if (od) {
287 		kfree(new_od);
288 		return 0;
289 	}
290 
291 	/* not there, initialize a new one and retry */
292 	new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
293 	if (!new_od)
294 		return -ENOMEM;
295 
296 	atomic_set(&new_od->refcnt, 0);
297 	atomic_set(&new_od->event, 1);
298 	init_waitqueue_head(&new_od->poll);
299 	INIT_LIST_HEAD(&new_od->buffers);
300 	goto retry;
301 }
302 
303 /**
304  *	sysfs_put_open_dirent - put sysfs_open_dirent
305  *	@sd: target sysfs_dirent
306  *	@buffer: associated sysfs_buffer
307  *
308  *	Put @sd->s_attr.open and unlink @buffer from the buffers list.
309  *	If reference count reaches zero, disassociate and free it.
310  *
311  *	LOCKING:
312  *	None.
313  */
314 static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
315 				  struct sysfs_buffer *buffer)
316 {
317 	struct sysfs_open_dirent *od = sd->s_attr.open;
318 
319 	spin_lock(&sysfs_open_dirent_lock);
320 
321 	list_del(&buffer->list);
322 	if (atomic_dec_and_test(&od->refcnt))
323 		sd->s_attr.open = NULL;
324 	else
325 		od = NULL;
326 
327 	spin_unlock(&sysfs_open_dirent_lock);
328 
329 	kfree(od);
330 }
331 
332 static int sysfs_open_file(struct inode *inode, struct file *file)
333 {
334 	struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
335 	struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
336 	struct sysfs_buffer *buffer;
337 	struct sysfs_ops *ops;
338 	int error = -EACCES;
339 	char *p;
340 
341 	p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
342 	if (p)
343 		memmove(last_sysfs_file, p, strlen(p) + 1);
344 
345 	/* need attr_sd for attr and ops, its parent for kobj */
346 	if (!sysfs_get_active_two(attr_sd))
347 		return -ENODEV;
348 
349 	/* every kobject with an attribute needs a ktype assigned */
350 	if (kobj->ktype && kobj->ktype->sysfs_ops)
351 		ops = kobj->ktype->sysfs_ops;
352 	else {
353 		WARN(1, KERN_ERR "missing sysfs attribute operations for "
354 		       "kobject: %s\n", kobject_name(kobj));
355 		goto err_out;
356 	}
357 
358 	/* File needs write support.
359 	 * The inode's perms must say it's ok,
360 	 * and we must have a store method.
361 	 */
362 	if (file->f_mode & FMODE_WRITE) {
363 		if (!(inode->i_mode & S_IWUGO) || !ops->store)
364 			goto err_out;
365 	}
366 
367 	/* File needs read support.
368 	 * The inode's perms must say it's ok, and we there
369 	 * must be a show method for it.
370 	 */
371 	if (file->f_mode & FMODE_READ) {
372 		if (!(inode->i_mode & S_IRUGO) || !ops->show)
373 			goto err_out;
374 	}
375 
376 	/* No error? Great, allocate a buffer for the file, and store it
377 	 * it in file->private_data for easy access.
378 	 */
379 	error = -ENOMEM;
380 	buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL);
381 	if (!buffer)
382 		goto err_out;
383 
384 	mutex_init(&buffer->mutex);
385 	buffer->needs_read_fill = 1;
386 	buffer->ops = ops;
387 	file->private_data = buffer;
388 
389 	/* make sure we have open dirent struct */
390 	error = sysfs_get_open_dirent(attr_sd, buffer);
391 	if (error)
392 		goto err_free;
393 
394 	/* open succeeded, put active references */
395 	sysfs_put_active_two(attr_sd);
396 	return 0;
397 
398  err_free:
399 	kfree(buffer);
400  err_out:
401 	sysfs_put_active_two(attr_sd);
402 	return error;
403 }
404 
405 static int sysfs_release(struct inode *inode, struct file *filp)
406 {
407 	struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
408 	struct sysfs_buffer *buffer = filp->private_data;
409 
410 	sysfs_put_open_dirent(sd, buffer);
411 
412 	if (buffer->page)
413 		free_page((unsigned long)buffer->page);
414 	kfree(buffer);
415 
416 	return 0;
417 }
418 
419 /* Sysfs attribute files are pollable.  The idea is that you read
420  * the content and then you use 'poll' or 'select' to wait for
421  * the content to change.  When the content changes (assuming the
422  * manager for the kobject supports notification), poll will
423  * return POLLERR|POLLPRI, and select will return the fd whether
424  * it is waiting for read, write, or exceptions.
425  * Once poll/select indicates that the value has changed, you
426  * need to close and re-open the file, or seek to 0 and read again.
427  * Reminder: this only works for attributes which actively support
428  * it, and it is not possible to test an attribute from userspace
429  * to see if it supports poll (Neither 'poll' nor 'select' return
430  * an appropriate error code).  When in doubt, set a suitable timeout value.
431  */
432 static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
433 {
434 	struct sysfs_buffer * buffer = filp->private_data;
435 	struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
436 	struct sysfs_open_dirent *od = attr_sd->s_attr.open;
437 
438 	/* need parent for the kobj, grab both */
439 	if (!sysfs_get_active_two(attr_sd))
440 		goto trigger;
441 
442 	poll_wait(filp, &od->poll, wait);
443 
444 	sysfs_put_active_two(attr_sd);
445 
446 	if (buffer->event != atomic_read(&od->event))
447 		goto trigger;
448 
449 	return 0;
450 
451  trigger:
452 	buffer->needs_read_fill = 1;
453 	return POLLERR|POLLPRI;
454 }
455 
456 void sysfs_notify_dirent(struct sysfs_dirent *sd)
457 {
458 	struct sysfs_open_dirent *od;
459 
460 	spin_lock(&sysfs_open_dirent_lock);
461 
462 	od = sd->s_attr.open;
463 	if (od) {
464 		atomic_inc(&od->event);
465 		wake_up_interruptible(&od->poll);
466 	}
467 
468 	spin_unlock(&sysfs_open_dirent_lock);
469 }
470 EXPORT_SYMBOL_GPL(sysfs_notify_dirent);
471 
472 void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
473 {
474 	struct sysfs_dirent *sd = k->sd;
475 
476 	mutex_lock(&sysfs_mutex);
477 
478 	if (sd && dir)
479 		sd = sysfs_find_dirent(sd, dir);
480 	if (sd && attr)
481 		sd = sysfs_find_dirent(sd, attr);
482 	if (sd)
483 		sysfs_notify_dirent(sd);
484 
485 	mutex_unlock(&sysfs_mutex);
486 }
487 EXPORT_SYMBOL_GPL(sysfs_notify);
488 
489 const struct file_operations sysfs_file_operations = {
490 	.read		= sysfs_read_file,
491 	.write		= sysfs_write_file,
492 	.llseek		= generic_file_llseek,
493 	.open		= sysfs_open_file,
494 	.release	= sysfs_release,
495 	.poll		= sysfs_poll,
496 };
497 
498 int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
499 			const struct attribute *attr, int type, mode_t amode)
500 {
501 	umode_t mode = (amode & S_IALLUGO) | S_IFREG;
502 	struct sysfs_addrm_cxt acxt;
503 	struct sysfs_dirent *sd;
504 	int rc;
505 
506 	sd = sysfs_new_dirent(attr->name, mode, type);
507 	if (!sd)
508 		return -ENOMEM;
509 	sd->s_attr.attr = (void *)attr;
510 
511 	sysfs_addrm_start(&acxt, dir_sd);
512 	rc = sysfs_add_one(&acxt, sd);
513 	sysfs_addrm_finish(&acxt);
514 
515 	if (rc)
516 		sysfs_put(sd);
517 
518 	return rc;
519 }
520 
521 
522 int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
523 		   int type)
524 {
525 	return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
526 }
527 
528 
529 /**
530  *	sysfs_create_file - create an attribute file for an object.
531  *	@kobj:	object we're creating for.
532  *	@attr:	attribute descriptor.
533  */
534 
535 int sysfs_create_file(struct kobject * kobj, const struct attribute * attr)
536 {
537 	BUG_ON(!kobj || !kobj->sd || !attr);
538 
539 	return sysfs_add_file(kobj->sd, attr, SYSFS_KOBJ_ATTR);
540 
541 }
542 
543 
544 /**
545  * sysfs_add_file_to_group - add an attribute file to a pre-existing group.
546  * @kobj: object we're acting for.
547  * @attr: attribute descriptor.
548  * @group: group name.
549  */
550 int sysfs_add_file_to_group(struct kobject *kobj,
551 		const struct attribute *attr, const char *group)
552 {
553 	struct sysfs_dirent *dir_sd;
554 	int error;
555 
556 	if (group)
557 		dir_sd = sysfs_get_dirent(kobj->sd, group);
558 	else
559 		dir_sd = sysfs_get(kobj->sd);
560 
561 	if (!dir_sd)
562 		return -ENOENT;
563 
564 	error = sysfs_add_file(dir_sd, attr, SYSFS_KOBJ_ATTR);
565 	sysfs_put(dir_sd);
566 
567 	return error;
568 }
569 EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
570 
571 /**
572  * sysfs_chmod_file - update the modified mode value on an object attribute.
573  * @kobj: object we're acting for.
574  * @attr: attribute descriptor.
575  * @mode: file permissions.
576  *
577  */
578 int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
579 {
580 	struct sysfs_dirent *victim_sd = NULL;
581 	struct dentry *victim = NULL;
582 	struct inode * inode;
583 	struct iattr newattrs;
584 	int rc;
585 
586 	rc = -ENOENT;
587 	victim_sd = sysfs_get_dirent(kobj->sd, attr->name);
588 	if (!victim_sd)
589 		goto out;
590 
591 	mutex_lock(&sysfs_rename_mutex);
592 	victim = sysfs_get_dentry(victim_sd);
593 	mutex_unlock(&sysfs_rename_mutex);
594 	if (IS_ERR(victim)) {
595 		rc = PTR_ERR(victim);
596 		victim = NULL;
597 		goto out;
598 	}
599 
600 	inode = victim->d_inode;
601 
602 	mutex_lock(&inode->i_mutex);
603 
604 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
605 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
606 	newattrs.ia_ctime = current_fs_time(inode->i_sb);
607 	rc = sysfs_setattr(victim, &newattrs);
608 
609 	if (rc == 0) {
610 		fsnotify_change(victim, newattrs.ia_valid);
611 		mutex_lock(&sysfs_mutex);
612 		victim_sd->s_mode = newattrs.ia_mode;
613 		mutex_unlock(&sysfs_mutex);
614 	}
615 
616 	mutex_unlock(&inode->i_mutex);
617  out:
618 	dput(victim);
619 	sysfs_put(victim_sd);
620 	return rc;
621 }
622 EXPORT_SYMBOL_GPL(sysfs_chmod_file);
623 
624 
625 /**
626  *	sysfs_remove_file - remove an object attribute.
627  *	@kobj:	object we're acting for.
628  *	@attr:	attribute descriptor.
629  *
630  *	Hash the attribute name and kill the victim.
631  */
632 
633 void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
634 {
635 	sysfs_hash_and_remove(kobj->sd, attr->name);
636 }
637 
638 
639 /**
640  * sysfs_remove_file_from_group - remove an attribute file from a group.
641  * @kobj: object we're acting for.
642  * @attr: attribute descriptor.
643  * @group: group name.
644  */
645 void sysfs_remove_file_from_group(struct kobject *kobj,
646 		const struct attribute *attr, const char *group)
647 {
648 	struct sysfs_dirent *dir_sd;
649 
650 	if (group)
651 		dir_sd = sysfs_get_dirent(kobj->sd, group);
652 	else
653 		dir_sd = sysfs_get(kobj->sd);
654 	if (dir_sd) {
655 		sysfs_hash_and_remove(dir_sd, attr->name);
656 		sysfs_put(dir_sd);
657 	}
658 }
659 EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
660 
661 struct sysfs_schedule_callback_struct {
662 	struct list_head	workq_list;
663 	struct kobject		*kobj;
664 	void			(*func)(void *);
665 	void			*data;
666 	struct module		*owner;
667 	struct work_struct	work;
668 };
669 
670 static DEFINE_MUTEX(sysfs_workq_mutex);
671 static LIST_HEAD(sysfs_workq);
672 static void sysfs_schedule_callback_work(struct work_struct *work)
673 {
674 	struct sysfs_schedule_callback_struct *ss = container_of(work,
675 			struct sysfs_schedule_callback_struct, work);
676 
677 	(ss->func)(ss->data);
678 	kobject_put(ss->kobj);
679 	module_put(ss->owner);
680 	mutex_lock(&sysfs_workq_mutex);
681 	list_del(&ss->workq_list);
682 	mutex_unlock(&sysfs_workq_mutex);
683 	kfree(ss);
684 }
685 
686 /**
687  * sysfs_schedule_callback - helper to schedule a callback for a kobject
688  * @kobj: object we're acting for.
689  * @func: callback function to invoke later.
690  * @data: argument to pass to @func.
691  * @owner: module owning the callback code
692  *
693  * sysfs attribute methods must not unregister themselves or their parent
694  * kobject (which would amount to the same thing).  Attempts to do so will
695  * deadlock, since unregistration is mutually exclusive with driver
696  * callbacks.
697  *
698  * Instead methods can call this routine, which will attempt to allocate
699  * and schedule a workqueue request to call back @func with @data as its
700  * argument in the workqueue's process context.  @kobj will be pinned
701  * until @func returns.
702  *
703  * Returns 0 if the request was submitted, -ENOMEM if storage could not
704  * be allocated, -ENODEV if a reference to @owner isn't available,
705  * -EAGAIN if a callback has already been scheduled for @kobj.
706  */
707 int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
708 		void *data, struct module *owner)
709 {
710 	struct sysfs_schedule_callback_struct *ss, *tmp;
711 
712 	if (!try_module_get(owner))
713 		return -ENODEV;
714 
715 	mutex_lock(&sysfs_workq_mutex);
716 	list_for_each_entry_safe(ss, tmp, &sysfs_workq, workq_list)
717 		if (ss->kobj == kobj) {
718 			mutex_unlock(&sysfs_workq_mutex);
719 			return -EAGAIN;
720 		}
721 	mutex_unlock(&sysfs_workq_mutex);
722 
723 	ss = kmalloc(sizeof(*ss), GFP_KERNEL);
724 	if (!ss) {
725 		module_put(owner);
726 		return -ENOMEM;
727 	}
728 	kobject_get(kobj);
729 	ss->kobj = kobj;
730 	ss->func = func;
731 	ss->data = data;
732 	ss->owner = owner;
733 	INIT_WORK(&ss->work, sysfs_schedule_callback_work);
734 	INIT_LIST_HEAD(&ss->workq_list);
735 	mutex_lock(&sysfs_workq_mutex);
736 	list_add_tail(&ss->workq_list, &sysfs_workq);
737 	mutex_unlock(&sysfs_workq_mutex);
738 	schedule_work(&ss->work);
739 	return 0;
740 }
741 EXPORT_SYMBOL_GPL(sysfs_schedule_callback);
742 
743 
744 EXPORT_SYMBOL_GPL(sysfs_create_file);
745 EXPORT_SYMBOL_GPL(sysfs_remove_file);
746