xref: /linux/sound/core/misc.c (revision 1fe93b2a2ace9bba2cb90920f9300834e537665c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Misc and compatibility things
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  */
6 
7 #include <linux/init.h>
8 #include <linux/export.h>
9 #include <linux/moduleparam.h>
10 #include <linux/time.h>
11 #include <linux/slab.h>
12 #include <linux/ioport.h>
13 #include <linux/fs.h>
14 #include <sound/core.h>
15 
release_and_free_resource(struct resource * res)16 void release_and_free_resource(struct resource *res)
17 {
18 	if (res) {
19 		release_resource(res);
20 		kfree(res);
21 	}
22 }
23 EXPORT_SYMBOL(release_and_free_resource);
24 
25 #ifdef CONFIG_PCI
26 #include <linux/pci.h>
27 /**
28  * snd_pci_quirk_lookup_id - look up a PCI SSID quirk list
29  * @vendor: PCI SSV id
30  * @device: PCI SSD id
31  * @list: quirk list, terminated by a null entry
32  *
33  * Look through the given quirk list and finds a matching entry
34  * with the same PCI SSID.  When subdevice is 0, all subdevice
35  * values may match.
36  *
37  * Returns the matched entry pointer, or NULL if nothing matched.
38  */
39 const struct snd_pci_quirk *
snd_pci_quirk_lookup_id(u16 vendor,u16 device,const struct snd_pci_quirk * list)40 snd_pci_quirk_lookup_id(u16 vendor, u16 device,
41 			const struct snd_pci_quirk *list)
42 {
43 	const struct snd_pci_quirk *q;
44 
45 	for (q = list; q->subvendor || q->subdevice; q++) {
46 		if (q->subvendor != vendor)
47 			continue;
48 		if (!q->subdevice ||
49 		    (device & q->subdevice_mask) == q->subdevice)
50 			return q;
51 	}
52 	return NULL;
53 }
54 EXPORT_SYMBOL(snd_pci_quirk_lookup_id);
55 
56 /**
57  * snd_pci_quirk_lookup - look up a PCI SSID quirk list
58  * @pci: pci_dev handle
59  * @list: quirk list, terminated by a null entry
60  *
61  * Look through the given quirk list and finds a matching entry
62  * with the same PCI SSID.  When subdevice is 0, all subdevice
63  * values may match.
64  *
65  * Returns the matched entry pointer, or NULL if nothing matched.
66  */
67 const struct snd_pci_quirk *
snd_pci_quirk_lookup(struct pci_dev * pci,const struct snd_pci_quirk * list)68 snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list)
69 {
70 	if (!pci)
71 		return NULL;
72 	return snd_pci_quirk_lookup_id(pci->subsystem_vendor,
73 				       pci->subsystem_device,
74 				       list);
75 }
76 EXPORT_SYMBOL(snd_pci_quirk_lookup);
77 #endif
78 
79 /*
80  * Deferred async signal helpers
81  *
82  * Below are a few helper functions to wrap the async signal handling
83  * in the deferred work.  The main purpose is to avoid the messy deadlock
84  * around tasklist_lock and co at the kill_fasync() invocation.
85  * fasync_helper() and kill_fasync() are replaced with snd_fasync_helper()
86  * and snd_kill_fasync(), respectively.  In addition, snd_fasync_free() has
87  * to be called at releasing the relevant file object.
88  */
89 struct snd_fasync {
90 	struct fasync_struct *fasync;
91 	int signal;
92 	int poll;
93 	int on;
94 	struct list_head list;
95 };
96 
97 static DEFINE_SPINLOCK(snd_fasync_lock);
98 static LIST_HEAD(snd_fasync_list);
99 
snd_fasync_work_fn(struct work_struct * work)100 static void snd_fasync_work_fn(struct work_struct *work)
101 {
102 	struct snd_fasync *fasync;
103 	int signal, poll;
104 
105 	spin_lock_irq(&snd_fasync_lock);
106 	while (!list_empty(&snd_fasync_list)) {
107 		fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list);
108 		list_del_init(&fasync->list);
109 		if (!fasync->on)
110 			continue;
111 		signal = fasync->signal;
112 		poll = fasync->poll;
113 		spin_unlock_irq(&snd_fasync_lock);
114 		kill_fasync(&fasync->fasync, signal, poll);
115 		spin_lock_irq(&snd_fasync_lock);
116 	}
117 	spin_unlock_irq(&snd_fasync_lock);
118 }
119 
120 static DECLARE_WORK(snd_fasync_work, snd_fasync_work_fn);
121 
snd_fasync_helper(int fd,struct file * file,int on,struct snd_fasync ** fasyncp)122 int snd_fasync_helper(int fd, struct file *file, int on,
123 		      struct snd_fasync **fasyncp)
124 {
125 	struct snd_fasync *fasync = NULL;
126 
127 	if (on) {
128 		fasync = kzalloc(sizeof(*fasync), GFP_KERNEL);
129 		if (!fasync)
130 			return -ENOMEM;
131 		INIT_LIST_HEAD(&fasync->list);
132 	}
133 
134 	scoped_guard(spinlock_irq, &snd_fasync_lock) {
135 		if (*fasyncp) {
136 			kfree(fasync);
137 			fasync = *fasyncp;
138 		} else {
139 			if (!fasync)
140 				return 0;
141 			*fasyncp = fasync;
142 		}
143 		fasync->on = on;
144 	}
145 	return fasync_helper(fd, file, on, &fasync->fasync);
146 }
147 EXPORT_SYMBOL_GPL(snd_fasync_helper);
148 
snd_kill_fasync(struct snd_fasync * fasync,int signal,int poll)149 void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll)
150 {
151 	if (!fasync || !fasync->on)
152 		return;
153 	guard(spinlock_irqsave)(&snd_fasync_lock);
154 	fasync->signal = signal;
155 	fasync->poll = poll;
156 	list_move(&fasync->list, &snd_fasync_list);
157 	schedule_work(&snd_fasync_work);
158 }
159 EXPORT_SYMBOL_GPL(snd_kill_fasync);
160 
snd_fasync_free(struct snd_fasync * fasync)161 void snd_fasync_free(struct snd_fasync *fasync)
162 {
163 	if (!fasync)
164 		return;
165 
166 	scoped_guard(spinlock_irq, &snd_fasync_lock)
167 		list_del_init(&fasync->list);
168 
169 	flush_work(&snd_fasync_work);
170 	kfree(fasync);
171 }
172 EXPORT_SYMBOL_GPL(snd_fasync_free);
173