xref: /linux/arch/sparc/kernel/pci_msi.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /* pci_msi.c: Sparc64 MSI support common layer.
3  *
4  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5  */
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/slab.h>
9 #include <linux/irq.h>
10 
11 #include "pci_impl.h"
12 
13 static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
14 {
15 	struct sparc64_msiq_cookie *msiq_cookie = cookie;
16 	struct pci_pbm_info *pbm = msiq_cookie->pbm;
17 	unsigned long msiqid = msiq_cookie->msiqid;
18 	const struct sparc64_msiq_ops *ops;
19 	unsigned long orig_head, head;
20 	int err;
21 
22 	ops = pbm->msi_ops;
23 
24 	err = ops->get_head(pbm, msiqid, &head);
25 	if (unlikely(err < 0))
26 		goto err_get_head;
27 
28 	orig_head = head;
29 	for (;;) {
30 		unsigned long msi;
31 
32 		err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
33 		if (likely(err > 0)) {
34 			unsigned int irq;
35 
36 			irq = pbm->msi_irq_table[msi - pbm->msi_first];
37 			generic_handle_irq(irq);
38 		}
39 
40 		if (unlikely(err < 0))
41 			goto err_dequeue;
42 
43 		if (err == 0)
44 			break;
45 	}
46 	if (likely(head != orig_head)) {
47 		err = ops->set_head(pbm, msiqid, head);
48 		if (unlikely(err < 0))
49 			goto err_set_head;
50 	}
51 	return IRQ_HANDLED;
52 
53 err_get_head:
54 	printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
55 	       msiqid, err);
56 	goto err_out;
57 
58 err_dequeue:
59 	printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
60 	       "gives error %d\n",
61 	       head, msiqid, err);
62 	goto err_out;
63 
64 err_set_head:
65 	printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
66 	       "gives error %d\n",
67 	       head, msiqid, err);
68 	goto err_out;
69 
70 err_out:
71 	return IRQ_NONE;
72 }
73 
74 static u32 pick_msiq(struct pci_pbm_info *pbm)
75 {
76 	static DEFINE_SPINLOCK(rotor_lock);
77 	unsigned long flags;
78 	u32 ret, rotor;
79 
80 	spin_lock_irqsave(&rotor_lock, flags);
81 
82 	rotor = pbm->msiq_rotor;
83 	ret = pbm->msiq_first + rotor;
84 
85 	if (++rotor >= pbm->msiq_num)
86 		rotor = 0;
87 	pbm->msiq_rotor = rotor;
88 
89 	spin_unlock_irqrestore(&rotor_lock, flags);
90 
91 	return ret;
92 }
93 
94 
95 static int alloc_msi(struct pci_pbm_info *pbm)
96 {
97 	int i;
98 
99 	for (i = 0; i < pbm->msi_num; i++) {
100 		if (!test_and_set_bit(i, pbm->msi_bitmap))
101 			return i + pbm->msi_first;
102 	}
103 
104 	return -ENOENT;
105 }
106 
107 static void free_msi(struct pci_pbm_info *pbm, int msi_num)
108 {
109 	msi_num -= pbm->msi_first;
110 	clear_bit(msi_num, pbm->msi_bitmap);
111 }
112 
113 static struct irq_chip msi_irq = {
114 	.name		= "PCI-MSI",
115 	.irq_mask	= pci_msi_mask_irq,
116 	.irq_unmask	= pci_msi_unmask_irq,
117 	.irq_enable	= pci_msi_unmask_irq,
118 	.irq_disable	= pci_msi_mask_irq,
119 	/* XXX affinity XXX */
120 };
121 
122 static int sparc64_setup_msi_irq(unsigned int *irq_p,
123 				 struct pci_dev *pdev,
124 				 struct msi_desc *entry)
125 {
126 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
127 	const struct sparc64_msiq_ops *ops = pbm->msi_ops;
128 	struct msi_msg msg;
129 	int msi, err;
130 	u32 msiqid;
131 
132 	*irq_p = irq_alloc(0, 0);
133 	err = -ENOMEM;
134 	if (!*irq_p)
135 		goto out_err;
136 
137 	irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
138 				      "MSI");
139 
140 	err = alloc_msi(pbm);
141 	if (unlikely(err < 0))
142 		goto out_irq_free;
143 
144 	msi = err;
145 
146 	msiqid = pick_msiq(pbm);
147 
148 	err = ops->msi_setup(pbm, msiqid, msi,
149 			     (entry->pci.msi_attrib.is_64 ? 1 : 0));
150 	if (err)
151 		goto out_msi_free;
152 
153 	pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
154 
155 	if (entry->pci.msi_attrib.is_64) {
156 		msg.address_hi = pbm->msi64_start >> 32;
157 		msg.address_lo = pbm->msi64_start & 0xffffffff;
158 	} else {
159 		msg.address_hi = 0;
160 		msg.address_lo = pbm->msi32_start;
161 	}
162 	msg.data = msi;
163 
164 	irq_set_msi_desc(*irq_p, entry);
165 	pci_write_msi_msg(*irq_p, &msg);
166 
167 	return 0;
168 
169 out_msi_free:
170 	free_msi(pbm, msi);
171 
172 out_irq_free:
173 	irq_set_chip(*irq_p, NULL);
174 	irq_free(*irq_p);
175 	*irq_p = 0;
176 
177 out_err:
178 	return err;
179 }
180 
181 static void sparc64_teardown_msi_irq(unsigned int irq,
182 				     struct pci_dev *pdev)
183 {
184 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
185 	const struct sparc64_msiq_ops *ops = pbm->msi_ops;
186 	unsigned int msi_num;
187 	int i, err;
188 
189 	for (i = 0; i < pbm->msi_num; i++) {
190 		if (pbm->msi_irq_table[i] == irq)
191 			break;
192 	}
193 	if (i >= pbm->msi_num) {
194 		pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
195 			irq);
196 		return;
197 	}
198 
199 	msi_num = pbm->msi_first + i;
200 	pbm->msi_irq_table[i] = ~0U;
201 
202 	err = ops->msi_teardown(pbm, msi_num);
203 	if (err) {
204 		pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
205 			"irq %u, gives error %d\n", pbm->name, msi_num, irq,
206 			err);
207 		return;
208 	}
209 
210 	free_msi(pbm, msi_num);
211 
212 	irq_set_chip(irq, NULL);
213 	irq_free(irq);
214 }
215 
216 static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
217 {
218 	unsigned long size, bits_per_ulong;
219 
220 	bits_per_ulong = sizeof(unsigned long) * 8;
221 	size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
222 	size /= 8;
223 	BUG_ON(size % sizeof(unsigned long));
224 
225 	pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
226 	if (!pbm->msi_bitmap)
227 		return -ENOMEM;
228 
229 	return 0;
230 }
231 
232 static void msi_bitmap_free(struct pci_pbm_info *pbm)
233 {
234 	kfree(pbm->msi_bitmap);
235 	pbm->msi_bitmap = NULL;
236 }
237 
238 static int msi_table_alloc(struct pci_pbm_info *pbm)
239 {
240 	int size, i;
241 
242 	size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
243 	pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
244 	if (!pbm->msiq_irq_cookies)
245 		return -ENOMEM;
246 
247 	for (i = 0; i < pbm->msiq_num; i++) {
248 		struct sparc64_msiq_cookie *p;
249 
250 		p = &pbm->msiq_irq_cookies[i];
251 		p->pbm = pbm;
252 		p->msiqid = pbm->msiq_first + i;
253 	}
254 
255 	size = pbm->msi_num * sizeof(unsigned int);
256 	pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
257 	if (!pbm->msi_irq_table) {
258 		kfree(pbm->msiq_irq_cookies);
259 		pbm->msiq_irq_cookies = NULL;
260 		return -ENOMEM;
261 	}
262 
263 	return 0;
264 }
265 
266 static void msi_table_free(struct pci_pbm_info *pbm)
267 {
268 	kfree(pbm->msiq_irq_cookies);
269 	pbm->msiq_irq_cookies = NULL;
270 
271 	kfree(pbm->msi_irq_table);
272 	pbm->msi_irq_table = NULL;
273 }
274 
275 static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
276 				 const struct sparc64_msiq_ops *ops,
277 				 unsigned long msiqid,
278 				 unsigned long devino)
279 {
280 	int irq = ops->msiq_build_irq(pbm, msiqid, devino);
281 	int err, nid;
282 
283 	if (irq < 0)
284 		return irq;
285 
286 	nid = pbm->numa_node;
287 	if (nid != -1) {
288 		cpumask_t numa_mask;
289 
290 		cpumask_copy(&numa_mask, cpumask_of_node(nid));
291 		irq_set_affinity(irq, &numa_mask);
292 	}
293 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
294 			  "MSIQ",
295 			  &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
296 	if (err)
297 		return err;
298 
299 	return 0;
300 }
301 
302 static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
303 				      const struct sparc64_msiq_ops *ops)
304 {
305 	int i;
306 
307 	for (i = 0; i < pbm->msiq_num; i++) {
308 		unsigned long msiqid = i + pbm->msiq_first;
309 		unsigned long devino = i + pbm->msiq_first_devino;
310 		int err;
311 
312 		err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
313 		if (err)
314 			return err;
315 	}
316 
317 	return 0;
318 }
319 
320 void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
321 			  const struct sparc64_msiq_ops *ops)
322 {
323 	const u32 *val;
324 	int len;
325 
326 	val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
327 	if (!val || len != 4)
328 		goto no_msi;
329 	pbm->msiq_num = *val;
330 	if (pbm->msiq_num) {
331 		const struct msiq_prop {
332 			u32 first_msiq;
333 			u32 num_msiq;
334 			u32 first_devino;
335 		} *mqp;
336 		const struct msi_range_prop {
337 			u32 first_msi;
338 			u32 num_msi;
339 		} *mrng;
340 		const struct addr_range_prop {
341 			u32 msi32_high;
342 			u32 msi32_low;
343 			u32 msi32_len;
344 			u32 msi64_high;
345 			u32 msi64_low;
346 			u32 msi64_len;
347 		} *arng;
348 
349 		val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
350 		if (!val || len != 4)
351 			goto no_msi;
352 
353 		pbm->msiq_ent_count = *val;
354 
355 		mqp = of_get_property(pbm->op->dev.of_node,
356 				      "msi-eq-to-devino", &len);
357 		if (!mqp)
358 			mqp = of_get_property(pbm->op->dev.of_node,
359 					      "msi-eq-devino", &len);
360 		if (!mqp || len != sizeof(struct msiq_prop))
361 			goto no_msi;
362 
363 		pbm->msiq_first = mqp->first_msiq;
364 		pbm->msiq_first_devino = mqp->first_devino;
365 
366 		val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
367 		if (!val || len != 4)
368 			goto no_msi;
369 		pbm->msi_num = *val;
370 
371 		mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
372 		if (!mrng || len != sizeof(struct msi_range_prop))
373 			goto no_msi;
374 		pbm->msi_first = mrng->first_msi;
375 
376 		val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
377 		if (!val || len != 4)
378 			goto no_msi;
379 		pbm->msi_data_mask = *val;
380 
381 		val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
382 		if (!val || len != 4)
383 			goto no_msi;
384 		pbm->msix_data_width = *val;
385 
386 		arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
387 				       &len);
388 		if (!arng || len != sizeof(struct addr_range_prop))
389 			goto no_msi;
390 		pbm->msi32_start = ((u64)arng->msi32_high << 32) |
391 			(u64) arng->msi32_low;
392 		pbm->msi64_start = ((u64)arng->msi64_high << 32) |
393 			(u64) arng->msi64_low;
394 		pbm->msi32_len = arng->msi32_len;
395 		pbm->msi64_len = arng->msi64_len;
396 
397 		if (msi_bitmap_alloc(pbm))
398 			goto no_msi;
399 
400 		if (msi_table_alloc(pbm)) {
401 			msi_bitmap_free(pbm);
402 			goto no_msi;
403 		}
404 
405 		if (ops->msiq_alloc(pbm)) {
406 			msi_table_free(pbm);
407 			msi_bitmap_free(pbm);
408 			goto no_msi;
409 		}
410 
411 		if (sparc64_bringup_msi_queues(pbm, ops)) {
412 			ops->msiq_free(pbm);
413 			msi_table_free(pbm);
414 			msi_bitmap_free(pbm);
415 			goto no_msi;
416 		}
417 
418 		printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
419 		       "devino[0x%x]\n",
420 		       pbm->name,
421 		       pbm->msiq_first, pbm->msiq_num,
422 		       pbm->msiq_ent_count,
423 		       pbm->msiq_first_devino);
424 		printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
425 		       "width[%u]\n",
426 		       pbm->name,
427 		       pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
428 		       pbm->msix_data_width);
429 		printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
430 		       "addr64[0x%llx:0x%x]\n",
431 		       pbm->name,
432 		       pbm->msi32_start, pbm->msi32_len,
433 		       pbm->msi64_start, pbm->msi64_len);
434 		printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
435 		       pbm->name,
436 		       __pa(pbm->msi_queues));
437 
438 		pbm->msi_ops = ops;
439 		pbm->setup_msi_irq = sparc64_setup_msi_irq;
440 		pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
441 	}
442 	return;
443 
444 no_msi:
445 	pbm->msiq_num = 0;
446 	printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
447 }
448