xref: /linux/drivers/soc/ixp4xx/ixp4xx-qmgr.c (revision 4af20dc583b364fad45df6fb81873606af8b70fb)
1 /*
2  * Intel IXP4xx Queue Manager driver for Linux
3  *
4  * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of version 2 of the GNU General Public License
8  * as published by the Free Software Foundation.
9  */
10 
11 #include <linux/ioport.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/soc/ixp4xx/qmgr.h>
17 
18 /* FIXME: get rid of these static assigments */
19 #define IRQ_IXP4XX_BASE		16
20 #define IRQ_IXP4XX_QM1		(IRQ_IXP4XX_BASE + 3)
21 #define IRQ_IXP4XX_QM2		(IRQ_IXP4XX_BASE + 4)
22 
23 static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
24 static struct resource *mem_res;
25 static spinlock_t qmgr_lock;
26 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
27 static void (*irq_handlers[QUEUES])(void *pdev);
28 static void *irq_pdevs[QUEUES];
29 
30 #if DEBUG_QMGR
31 char qmgr_queue_descs[QUEUES][32];
32 #endif
33 
34 void qmgr_set_irq(unsigned int queue, int src,
35 		  void (*handler)(void *pdev), void *pdev)
36 {
37 	unsigned long flags;
38 
39 	spin_lock_irqsave(&qmgr_lock, flags);
40 	if (queue < HALF_QUEUES) {
41 		u32 __iomem *reg;
42 		int bit;
43 		BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
44 		reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
45 		bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
46 		__raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
47 			     reg);
48 	} else
49 		/* IRQ source for queues 32-63 is fixed */
50 		BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
51 
52 	irq_handlers[queue] = handler;
53 	irq_pdevs[queue] = pdev;
54 	spin_unlock_irqrestore(&qmgr_lock, flags);
55 }
56 
57 
58 static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
59 {
60 	int i, ret = 0;
61 	u32 en_bitmap, src, stat;
62 
63 	/* ACK - it may clear any bits so don't rely on it */
64 	__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
65 
66 	en_bitmap = qmgr_regs->irqen[0];
67 	while (en_bitmap) {
68 		i = __fls(en_bitmap); /* number of the last "low" queue */
69 		en_bitmap &= ~BIT(i);
70 		src = qmgr_regs->irqsrc[i >> 3];
71 		stat = qmgr_regs->stat1[i >> 3];
72 		if (src & 4) /* the IRQ condition is inverted */
73 			stat = ~stat;
74 		if (stat & BIT(src & 3)) {
75 			irq_handlers[i](irq_pdevs[i]);
76 			ret = IRQ_HANDLED;
77 		}
78 	}
79 	return ret;
80 }
81 
82 
83 static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
84 {
85 	int i, ret = 0;
86 	u32 req_bitmap;
87 
88 	/* ACK - it may clear any bits so don't rely on it */
89 	__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
90 
91 	req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
92 	while (req_bitmap) {
93 		i = __fls(req_bitmap); /* number of the last "high" queue */
94 		req_bitmap &= ~BIT(i);
95 		irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
96 		ret = IRQ_HANDLED;
97 	}
98 	return ret;
99 }
100 
101 
102 static irqreturn_t qmgr_irq(int irq, void *pdev)
103 {
104 	int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
105 	u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
106 
107 	if (!req_bitmap)
108 		return 0;
109 	__raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
110 
111 	while (req_bitmap) {
112 		i = __fls(req_bitmap); /* number of the last queue */
113 		req_bitmap &= ~BIT(i);
114 		i += half * HALF_QUEUES;
115 		irq_handlers[i](irq_pdevs[i]);
116 	}
117 	return IRQ_HANDLED;
118 }
119 
120 
121 void qmgr_enable_irq(unsigned int queue)
122 {
123 	unsigned long flags;
124 	int half = queue / 32;
125 	u32 mask = 1 << (queue & (HALF_QUEUES - 1));
126 
127 	spin_lock_irqsave(&qmgr_lock, flags);
128 	__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
129 		     &qmgr_regs->irqen[half]);
130 	spin_unlock_irqrestore(&qmgr_lock, flags);
131 }
132 
133 void qmgr_disable_irq(unsigned int queue)
134 {
135 	unsigned long flags;
136 	int half = queue / 32;
137 	u32 mask = 1 << (queue & (HALF_QUEUES - 1));
138 
139 	spin_lock_irqsave(&qmgr_lock, flags);
140 	__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
141 		     &qmgr_regs->irqen[half]);
142 	__raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
143 	spin_unlock_irqrestore(&qmgr_lock, flags);
144 }
145 
146 static inline void shift_mask(u32 *mask)
147 {
148 	mask[3] = mask[3] << 1 | mask[2] >> 31;
149 	mask[2] = mask[2] << 1 | mask[1] >> 31;
150 	mask[1] = mask[1] << 1 | mask[0] >> 31;
151 	mask[0] <<= 1;
152 }
153 
154 #if DEBUG_QMGR
155 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
156 		       unsigned int nearly_empty_watermark,
157 		       unsigned int nearly_full_watermark,
158 		       const char *desc_format, const char* name)
159 #else
160 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
161 			 unsigned int nearly_empty_watermark,
162 			 unsigned int nearly_full_watermark)
163 #endif
164 {
165 	u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
166 	int err;
167 
168 	BUG_ON(queue >= QUEUES);
169 
170 	if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
171 		return -EINVAL;
172 
173 	switch (len) {
174 	case  16:
175 		cfg = 0 << 24;
176 		mask[0] = 0x1;
177 		break;
178 	case  32:
179 		cfg = 1 << 24;
180 		mask[0] = 0x3;
181 		break;
182 	case  64:
183 		cfg = 2 << 24;
184 		mask[0] = 0xF;
185 		break;
186 	case 128:
187 		cfg = 3 << 24;
188 		mask[0] = 0xFF;
189 		break;
190 	default:
191 		return -EINVAL;
192 	}
193 
194 	cfg |= nearly_empty_watermark << 26;
195 	cfg |= nearly_full_watermark << 29;
196 	len /= 16;		/* in 16-dwords: 1, 2, 4 or 8 */
197 	mask[1] = mask[2] = mask[3] = 0;
198 
199 	if (!try_module_get(THIS_MODULE))
200 		return -ENODEV;
201 
202 	spin_lock_irq(&qmgr_lock);
203 	if (__raw_readl(&qmgr_regs->sram[queue])) {
204 		err = -EBUSY;
205 		goto err;
206 	}
207 
208 	while (1) {
209 		if (!(used_sram_bitmap[0] & mask[0]) &&
210 		    !(used_sram_bitmap[1] & mask[1]) &&
211 		    !(used_sram_bitmap[2] & mask[2]) &&
212 		    !(used_sram_bitmap[3] & mask[3]))
213 			break; /* found free space */
214 
215 		addr++;
216 		shift_mask(mask);
217 		if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
218 			printk(KERN_ERR "qmgr: no free SRAM space for"
219 			       " queue %i\n", queue);
220 			err = -ENOMEM;
221 			goto err;
222 		}
223 	}
224 
225 	used_sram_bitmap[0] |= mask[0];
226 	used_sram_bitmap[1] |= mask[1];
227 	used_sram_bitmap[2] |= mask[2];
228 	used_sram_bitmap[3] |= mask[3];
229 	__raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
230 #if DEBUG_QMGR
231 	snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
232 		 desc_format, name);
233 	printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
234 	       qmgr_queue_descs[queue], queue, addr);
235 #endif
236 	spin_unlock_irq(&qmgr_lock);
237 	return 0;
238 
239 err:
240 	spin_unlock_irq(&qmgr_lock);
241 	module_put(THIS_MODULE);
242 	return err;
243 }
244 
245 void qmgr_release_queue(unsigned int queue)
246 {
247 	u32 cfg, addr, mask[4];
248 
249 	BUG_ON(queue >= QUEUES); /* not in valid range */
250 
251 	spin_lock_irq(&qmgr_lock);
252 	cfg = __raw_readl(&qmgr_regs->sram[queue]);
253 	addr = (cfg >> 14) & 0xFF;
254 
255 	BUG_ON(!addr);		/* not requested */
256 
257 	switch ((cfg >> 24) & 3) {
258 	case 0: mask[0] = 0x1; break;
259 	case 1: mask[0] = 0x3; break;
260 	case 2: mask[0] = 0xF; break;
261 	case 3: mask[0] = 0xFF; break;
262 	}
263 
264 	mask[1] = mask[2] = mask[3] = 0;
265 
266 	while (addr--)
267 		shift_mask(mask);
268 
269 #if DEBUG_QMGR
270 	printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
271 	       qmgr_queue_descs[queue], queue);
272 	qmgr_queue_descs[queue][0] = '\x0';
273 #endif
274 
275 	while ((addr = qmgr_get_entry(queue)))
276 		printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
277 		       queue, addr);
278 
279 	__raw_writel(0, &qmgr_regs->sram[queue]);
280 
281 	used_sram_bitmap[0] &= ~mask[0];
282 	used_sram_bitmap[1] &= ~mask[1];
283 	used_sram_bitmap[2] &= ~mask[2];
284 	used_sram_bitmap[3] &= ~mask[3];
285 	irq_handlers[queue] = NULL; /* catch IRQ bugs */
286 	spin_unlock_irq(&qmgr_lock);
287 
288 	module_put(THIS_MODULE);
289 }
290 
291 static int qmgr_init(void)
292 {
293 	int i, err;
294 	irq_handler_t handler1, handler2;
295 
296 	/* This driver does not work with device tree */
297 	if (of_have_populated_dt())
298 		return -ENODEV;
299 
300 	mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
301 				     IXP4XX_QMGR_REGION_SIZE,
302 				     "IXP4xx Queue Manager");
303 	if (mem_res == NULL)
304 		return -EBUSY;
305 
306 	/* reset qmgr registers */
307 	for (i = 0; i < 4; i++) {
308 		__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
309 		__raw_writel(0, &qmgr_regs->irqsrc[i]);
310 	}
311 	for (i = 0; i < 2; i++) {
312 		__raw_writel(0, &qmgr_regs->stat2[i]);
313 		__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
314 		__raw_writel(0, &qmgr_regs->irqen[i]);
315 	}
316 
317 	__raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
318 	__raw_writel(0, &qmgr_regs->statf_h);
319 
320 	for (i = 0; i < QUEUES; i++)
321 		__raw_writel(0, &qmgr_regs->sram[i]);
322 
323 	if (cpu_is_ixp42x_rev_a0()) {
324 		handler1 = qmgr_irq1_a0;
325 		handler2 = qmgr_irq2_a0;
326 	} else
327 		handler1 = handler2 = qmgr_irq;
328 
329 	err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
330 			  NULL);
331 	if (err) {
332 		printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
333 		       IRQ_IXP4XX_QM1, err);
334 		goto error_irq;
335 	}
336 
337 	err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager",
338 			  NULL);
339 	if (err) {
340 		printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
341 		       IRQ_IXP4XX_QM2, err);
342 		goto error_irq2;
343 	}
344 
345 	used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
346 	spin_lock_init(&qmgr_lock);
347 
348 	printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
349 	return 0;
350 
351 error_irq2:
352 	free_irq(IRQ_IXP4XX_QM1, NULL);
353 error_irq:
354 	release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
355 	return err;
356 }
357 
358 static void qmgr_remove(void)
359 {
360 	free_irq(IRQ_IXP4XX_QM1, NULL);
361 	free_irq(IRQ_IXP4XX_QM2, NULL);
362 	synchronize_irq(IRQ_IXP4XX_QM1);
363 	synchronize_irq(IRQ_IXP4XX_QM2);
364 	release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
365 }
366 
367 module_init(qmgr_init);
368 module_exit(qmgr_remove);
369 
370 MODULE_LICENSE("GPL v2");
371 MODULE_AUTHOR("Krzysztof Halasa");
372 
373 EXPORT_SYMBOL(qmgr_set_irq);
374 EXPORT_SYMBOL(qmgr_enable_irq);
375 EXPORT_SYMBOL(qmgr_disable_irq);
376 #if DEBUG_QMGR
377 EXPORT_SYMBOL(qmgr_queue_descs);
378 EXPORT_SYMBOL(qmgr_request_queue);
379 #else
380 EXPORT_SYMBOL(__qmgr_request_queue);
381 #endif
382 EXPORT_SYMBOL(qmgr_release_queue);
383