xref: /freebsd/sys/compat/linuxkpi/common/include/linux/interrupt.h (revision a9e0f316b3da4144c654cf77542bc5c51632e896)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_INTERRUPT_H_
30 #define	_LINUXKPI_LINUX_INTERRUPT_H_
31 
32 #include <linux/cpu.h>
33 #include <linux/device.h>
34 #include <linux/pci.h>
35 #include <linux/irqreturn.h>
36 #include <linux/hardirq.h>
37 
38 #include <sys/param.h>
39 #include <sys/interrupt.h>
40 
41 typedef	irqreturn_t	(*irq_handler_t)(int, void *);
42 
43 #define	IRQF_SHARED		0x0004	/* Historically */
44 #define	IRQF_NOBALANCING	0
45 
46 #define	IRQ_DISABLE_UNLAZY	0
47 
48 #define	IRQ_NOTCONNECTED	(1U << 31)
49 
50 int  lkpi_request_irq(struct device *, unsigned int, irq_handler_t,
51 	irq_handler_t, unsigned long, const char *, void *);
52 int  lkpi_enable_irq(unsigned int);
53 void lkpi_disable_irq(unsigned int);
54 int  lkpi_bind_irq_to_cpu(unsigned int, int);
55 void lkpi_free_irq(unsigned int, void *);
56 void lkpi_devm_free_irq(struct device *, unsigned int, void *);
57 
58 static inline int
request_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * arg)59 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
60     const char *name, void *arg)
61 {
62 
63 	return (lkpi_request_irq(NULL, irq, handler, NULL, flags, name, arg));
64 }
65 
66 static inline int
request_threaded_irq(int irq,irq_handler_t handler,irq_handler_t thread_handler,unsigned long flags,const char * name,void * arg)67 request_threaded_irq(int irq, irq_handler_t handler,
68     irq_handler_t thread_handler, unsigned long flags,
69     const char *name, void *arg)
70 {
71 
72 	return (lkpi_request_irq(NULL, irq, handler, thread_handler,
73 	    flags, name, arg));
74 }
75 
76 static inline int
devm_request_irq(struct device * dev,int irq,irq_handler_t handler,unsigned long flags,const char * name,void * arg)77 devm_request_irq(struct device *dev, int irq,
78     irq_handler_t handler, unsigned long flags, const char *name, void *arg)
79 {
80 
81 	return (lkpi_request_irq(dev, irq, handler, NULL, flags, name, arg));
82 }
83 
84 static inline int
devm_request_threaded_irq(struct device * dev,int irq,irq_handler_t handler,irq_handler_t thread_handler,unsigned long flags,const char * name,void * arg)85 devm_request_threaded_irq(struct device *dev, int irq,
86     irq_handler_t handler, irq_handler_t thread_handler,
87     unsigned long flags, const char *name, void *arg)
88 {
89 
90 	return (lkpi_request_irq(dev, irq, handler, thread_handler,
91 	    flags, name, arg));
92 }
93 
94 static inline int
enable_irq(unsigned int irq)95 enable_irq(unsigned int irq)
96 {
97 	return (lkpi_enable_irq(irq));
98 }
99 
100 static inline void
disable_irq(unsigned int irq)101 disable_irq(unsigned int irq)
102 {
103 	lkpi_disable_irq(irq);
104 }
105 
106 static inline void
disable_irq_nosync(unsigned int irq)107 disable_irq_nosync(unsigned int irq)
108 {
109 	lkpi_disable_irq(irq);
110 }
111 
112 static inline int
bind_irq_to_cpu(unsigned int irq,int cpu_id)113 bind_irq_to_cpu(unsigned int irq, int cpu_id)
114 {
115 	return (lkpi_bind_irq_to_cpu(irq, cpu_id));
116 }
117 
118 static inline void
free_irq(unsigned int irq,void * device)119 free_irq(unsigned int irq, void *device)
120 {
121 	lkpi_free_irq(irq, device);
122 }
123 
124 static inline void
devm_free_irq(struct device * xdev,unsigned int irq,void * p)125 devm_free_irq(struct device *xdev, unsigned int irq, void *p)
126 {
127 	lkpi_devm_free_irq(xdev, irq, p);
128 }
129 
130 static inline int
irq_set_affinity_hint(int vector,const cpumask_t * mask)131 irq_set_affinity_hint(int vector, const cpumask_t *mask)
132 {
133 	int error;
134 
135 	if (mask != NULL)
136 		error = intr_setaffinity(vector, CPU_WHICH_IRQ, mask);
137 	else
138 		error = intr_setaffinity(vector, CPU_WHICH_IRQ, cpuset_root);
139 
140 	return (-error);
141 }
142 
143 static inline struct msi_desc *
irq_get_msi_desc(unsigned int irq)144 irq_get_msi_desc(unsigned int irq)
145 {
146 
147 	return (lkpi_pci_msi_desc_alloc(irq));
148 }
149 
150 static inline void
irq_set_status_flags(unsigned int irq __unused,unsigned long flags __unused)151 irq_set_status_flags(unsigned int irq __unused, unsigned long flags __unused)
152 {
153 }
154 
155 /*
156  * LinuxKPI tasklet support
157  */
158 struct tasklet_struct;
159 typedef void tasklet_func_t(unsigned long);
160 typedef void tasklet_callback_t(struct tasklet_struct *);
161 
162 struct tasklet_struct {
163 	TAILQ_ENTRY(tasklet_struct) entry;
164 	tasklet_func_t *func;
165 	/* Our "state" implementation is different. Avoid same name as Linux. */
166 	volatile u_int tasklet_state;
167 	atomic_t count;
168 	unsigned long data;
169 	tasklet_callback_t *callback;
170 	bool use_callback;
171 };
172 
173 #define	DECLARE_TASKLET(_name, _func, _data)	\
174 struct tasklet_struct _name = { .func = (_func), .data = (_data) }
175 
176 #define	tasklet_hi_schedule(t)	tasklet_schedule(t)
177 
178 /* Some other compat code in the tree has this defined as well. */
179 #define	from_tasklet(_dev, _t, _field)		\
180     container_of(_t, typeof(*(_dev)), _field)
181 
182 void tasklet_setup(struct tasklet_struct *, tasklet_callback_t *);
183 extern void tasklet_schedule(struct tasklet_struct *);
184 extern void tasklet_kill(struct tasklet_struct *);
185 extern void tasklet_init(struct tasklet_struct *, tasklet_func_t *,
186     unsigned long data);
187 extern void tasklet_enable(struct tasklet_struct *);
188 extern void tasklet_disable(struct tasklet_struct *);
189 extern void tasklet_disable_nosync(struct tasklet_struct *);
190 extern int tasklet_trylock(struct tasklet_struct *);
191 extern void tasklet_unlock(struct tasklet_struct *);
192 extern void tasklet_unlock_wait(struct tasklet_struct *ts);
193 #define	tasklet_unlock_spin_wait(ts)	tasklet_unlock_wait(ts)
194 
195 #endif	/* _LINUXKPI_LINUX_INTERRUPT_H_ */
196