xref: /linux/arch/s390/pci/pci_irq.c (revision 0723a166d1f1da4c60d7b11289383f073e4dee9b)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "zpci: " fmt
3 
4 #include <linux/kernel.h>
5 #include <linux/irq.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/pci.h>
8 #include <linux/msi.h>
9 #include <linux/irqchip/irq-msi-lib.h>
10 #include <linux/smp.h>
11 
12 #include <asm/isc.h>
13 #include <asm/airq.h>
14 #include <asm/tpi.h>
15 
16 static enum {FLOATING, DIRECTED} irq_delivery;
17 
18 /*
19  * summary bit vector
20  * FLOATING - summary bit per function
21  * DIRECTED - summary bit per cpu (only used in fallback path)
22  */
23 static struct airq_iv *zpci_sbv;
24 
25 /*
26  * interrupt bit vectors
27  * FLOATING - interrupt bit vector per function
28  * DIRECTED - interrupt bit vector per cpu
29  */
30 static struct airq_iv **zpci_ibv;
31 
32 /* Modify PCI: Register floating adapter interruptions */
zpci_set_airq(struct zpci_dev * zdev)33 static int zpci_set_airq(struct zpci_dev *zdev)
34 {
35 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
36 	struct zpci_fib fib = {0};
37 	u8 status;
38 
39 	fib.fmt0.isc = PCI_ISC;
40 	fib.fmt0.sum = 1;	/* enable summary notifications */
41 	fib.fmt0.noi = airq_iv_end(zdev->aibv);
42 	fib.fmt0.aibv = virt_to_phys(zdev->aibv->vector);
43 	fib.fmt0.aibvo = 0;	/* each zdev has its own interrupt vector */
44 	fib.fmt0.aisb = virt_to_phys(zpci_sbv->vector) + (zdev->aisb / 64) * 8;
45 	fib.fmt0.aisbo = zdev->aisb & 63;
46 	fib.gd = zdev->gisa;
47 
48 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
49 }
50 
51 /* Modify PCI: Unregister floating adapter interruptions */
zpci_clear_airq(struct zpci_dev * zdev)52 static int zpci_clear_airq(struct zpci_dev *zdev)
53 {
54 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
55 	struct zpci_fib fib = {0};
56 	u8 cc, status;
57 
58 	fib.gd = zdev->gisa;
59 
60 	cc = zpci_mod_fc(req, &fib, &status);
61 	if (cc == 3 || (cc == 1 && status == 24))
62 		/* Function already gone or IRQs already deregistered. */
63 		cc = 0;
64 
65 	return cc ? -EIO : 0;
66 }
67 
68 /* Modify PCI: Register CPU directed interruptions */
zpci_set_directed_irq(struct zpci_dev * zdev)69 static int zpci_set_directed_irq(struct zpci_dev *zdev)
70 {
71 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
72 	struct zpci_fib fib = {0};
73 	u8 status;
74 
75 	fib.fmt = 1;
76 	fib.fmt1.noi = zdev->msi_nr_irqs;
77 	fib.fmt1.dibvo = zdev->msi_first_bit;
78 	fib.gd = zdev->gisa;
79 
80 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
81 }
82 
83 /* Modify PCI: Unregister CPU directed interruptions */
zpci_clear_directed_irq(struct zpci_dev * zdev)84 static int zpci_clear_directed_irq(struct zpci_dev *zdev)
85 {
86 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
87 	struct zpci_fib fib = {0};
88 	u8 cc, status;
89 
90 	fib.fmt = 1;
91 	fib.gd = zdev->gisa;
92 	cc = zpci_mod_fc(req, &fib, &status);
93 	if (cc == 3 || (cc == 1 && status == 24))
94 		/* Function already gone or IRQs already deregistered. */
95 		cc = 0;
96 
97 	return cc ? -EIO : 0;
98 }
99 
100 /* Register adapter interruptions */
zpci_set_irq(struct zpci_dev * zdev)101 int zpci_set_irq(struct zpci_dev *zdev)
102 {
103 	int rc;
104 
105 	if (irq_delivery == DIRECTED)
106 		rc = zpci_set_directed_irq(zdev);
107 	else
108 		rc = zpci_set_airq(zdev);
109 
110 	return rc;
111 }
112 
113 /* Clear adapter interruptions */
zpci_clear_irq(struct zpci_dev * zdev)114 static int zpci_clear_irq(struct zpci_dev *zdev)
115 {
116 	int rc;
117 
118 	if (irq_delivery == DIRECTED)
119 		rc = zpci_clear_directed_irq(zdev);
120 	else
121 		rc = zpci_clear_airq(zdev);
122 
123 	return rc;
124 }
125 
zpci_set_irq_affinity(struct irq_data * data,const struct cpumask * dest,bool force)126 static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
127 				 bool force)
128 {
129 	irq_data_update_affinity(data, dest);
130 	return IRQ_SET_MASK_OK;
131 }
132 
133 /*
134  * Encode the hwirq number for the parent domain. The encoding must be unique
135  * for each IRQ of each device in the parent domain, so it uses the devfn to
136  * identify the device and the msi_index to identify the IRQ within that device.
137  */
zpci_encode_hwirq(u8 devfn,u16 msi_index)138 static inline u32 zpci_encode_hwirq(u8 devfn, u16 msi_index)
139 {
140 	return (devfn << 16) | msi_index;
141 }
142 
zpci_decode_hwirq_msi_index(irq_hw_number_t hwirq)143 static inline u16 zpci_decode_hwirq_msi_index(irq_hw_number_t hwirq)
144 {
145 	return hwirq & 0xffff;
146 }
147 
zpci_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)148 static void zpci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
149 {
150 	struct msi_desc *desc = irq_data_get_msi_desc(data);
151 	struct zpci_dev *zdev = to_zpci_dev(desc->dev);
152 
153 	if (irq_delivery == DIRECTED) {
154 		int cpu = cpumask_first(irq_data_get_affinity_mask(data));
155 
156 		msg->address_lo = zdev->msi_addr & 0xff0000ff;
157 		msg->address_lo |= (smp_cpu_get_cpu_address(cpu) << 8);
158 	} else {
159 		msg->address_lo = zdev->msi_addr & 0xffffffff;
160 	}
161 	msg->address_hi = zdev->msi_addr >> 32;
162 	msg->data = zpci_decode_hwirq_msi_index(data->hwirq);
163 }
164 
165 static struct irq_chip zpci_irq_chip = {
166 	.name = "PCI-MSI",
167 	.irq_compose_msi_msg = zpci_compose_msi_msg,
168 };
169 
zpci_handle_cpu_local_irq(bool rescan)170 static void zpci_handle_cpu_local_irq(bool rescan)
171 {
172 	struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
173 	union zpci_sic_iib iib = {{0}};
174 	struct irq_domain *msi_domain;
175 	irq_hw_number_t hwirq;
176 	unsigned long bit;
177 	int irqs_on = 0;
178 
179 	for (bit = 0;;) {
180 		/* Scan the directed IRQ bit vector */
181 		bit = airq_iv_scan(dibv, bit, airq_iv_end(dibv));
182 		if (bit == -1UL) {
183 			if (!rescan || irqs_on++)
184 				/* End of second scan with interrupts on. */
185 				break;
186 			/* First scan complete, re-enable interrupts. */
187 			if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
188 				break;
189 			bit = 0;
190 			continue;
191 		}
192 		inc_irq_stat(IRQIO_MSI);
193 		hwirq = airq_iv_get_data(dibv, bit);
194 		msi_domain = (struct irq_domain *)airq_iv_get_ptr(dibv, bit);
195 		generic_handle_domain_irq(msi_domain, hwirq);
196 	}
197 }
198 
199 struct cpu_irq_data {
200 	call_single_data_t csd;
201 	atomic_t scheduled;
202 };
203 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data, irq_data);
204 
zpci_handle_remote_irq(void * data)205 static void zpci_handle_remote_irq(void *data)
206 {
207 	atomic_t *scheduled = data;
208 
209 	do {
210 		zpci_handle_cpu_local_irq(false);
211 	} while (atomic_dec_return(scheduled));
212 }
213 
zpci_handle_fallback_irq(void)214 static void zpci_handle_fallback_irq(void)
215 {
216 	struct cpu_irq_data *cpu_data;
217 	union zpci_sic_iib iib = {{0}};
218 	unsigned long cpu;
219 	int irqs_on = 0;
220 
221 	for (cpu = 0;;) {
222 		cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
223 		if (cpu == -1UL) {
224 			if (irqs_on++)
225 				/* End of second scan with interrupts on. */
226 				break;
227 			/* First scan complete, re-enable interrupts. */
228 			if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
229 				break;
230 			cpu = 0;
231 			continue;
232 		}
233 		cpu_data = &per_cpu(irq_data, cpu);
234 		if (atomic_inc_return(&cpu_data->scheduled) > 1)
235 			continue;
236 
237 		INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
238 		smp_call_function_single_async(cpu, &cpu_data->csd);
239 	}
240 }
241 
zpci_directed_irq_handler(struct airq_struct * airq,struct tpi_info * tpi_info)242 static void zpci_directed_irq_handler(struct airq_struct *airq,
243 				      struct tpi_info *tpi_info)
244 {
245 	bool floating = !tpi_info->directed_irq;
246 
247 	if (floating) {
248 		inc_irq_stat(IRQIO_PCF);
249 		zpci_handle_fallback_irq();
250 	} else {
251 		inc_irq_stat(IRQIO_PCD);
252 		zpci_handle_cpu_local_irq(true);
253 	}
254 }
255 
zpci_floating_irq_handler(struct airq_struct * airq,struct tpi_info * tpi_info)256 static void zpci_floating_irq_handler(struct airq_struct *airq,
257 				      struct tpi_info *tpi_info)
258 {
259 	union zpci_sic_iib iib = {{0}};
260 	struct irq_domain *msi_domain;
261 	irq_hw_number_t hwirq;
262 	unsigned long si, ai;
263 	struct airq_iv *aibv;
264 	int irqs_on = 0;
265 
266 	inc_irq_stat(IRQIO_PCF);
267 	for (si = 0;;) {
268 		/* Scan adapter summary indicator bit vector */
269 		si = airq_iv_scan(zpci_sbv, si, airq_iv_end(zpci_sbv));
270 		if (si == -1UL) {
271 			if (irqs_on++)
272 				/* End of second scan with interrupts on. */
273 				break;
274 			/* First scan complete, re-enable interrupts. */
275 			if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
276 				break;
277 			si = 0;
278 			continue;
279 		}
280 
281 		/* Scan the adapter interrupt vector for this device. */
282 		aibv = zpci_ibv[si];
283 		for (ai = 0;;) {
284 			ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
285 			if (ai == -1UL)
286 				break;
287 			inc_irq_stat(IRQIO_MSI);
288 			airq_iv_lock(aibv, ai);
289 			hwirq = airq_iv_get_data(aibv, ai);
290 			msi_domain = (struct irq_domain *)airq_iv_get_ptr(aibv, ai);
291 			generic_handle_domain_irq(msi_domain, hwirq);
292 			airq_iv_unlock(aibv, ai);
293 		}
294 	}
295 }
296 
__alloc_airq(struct zpci_dev * zdev,int msi_vecs,unsigned long * bit)297 static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
298 			unsigned long *bit)
299 {
300 	if (irq_delivery == DIRECTED) {
301 		/* Allocate cpu vector bits */
302 		*bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
303 		if (*bit == -1UL)
304 			return -EIO;
305 	} else {
306 		/* Allocate adapter summary indicator bit */
307 		*bit = airq_iv_alloc_bit(zpci_sbv);
308 		if (*bit == -1UL)
309 			return -EIO;
310 		zdev->aisb = *bit;
311 
312 		/* Create adapter interrupt vector */
313 		zdev->aibv = airq_iv_create(msi_vecs,
314 					    AIRQ_IV_PTR | AIRQ_IV_DATA | AIRQ_IV_BITLOCK,
315 					    NULL);
316 		if (!zdev->aibv)
317 			return -ENOMEM;
318 
319 		/* Wire up shortcut pointer */
320 		zpci_ibv[*bit] = zdev->aibv;
321 		/* Each function has its own interrupt vector */
322 		*bit = 0;
323 	}
324 	return 0;
325 }
326 
arch_restore_msi_irqs(struct pci_dev * pdev)327 bool arch_restore_msi_irqs(struct pci_dev *pdev)
328 {
329 	struct zpci_dev *zdev = to_zpci(pdev);
330 
331 	zpci_set_irq(zdev);
332 	return true;
333 }
334 
335 static struct airq_struct zpci_airq = {
336 	.handler = zpci_floating_irq_handler,
337 	.isc = PCI_ISC,
338 };
339 
zpci_msi_teardown_directed(struct zpci_dev * zdev)340 static void zpci_msi_teardown_directed(struct zpci_dev *zdev)
341 {
342 	airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->max_msi);
343 	zdev->msi_first_bit = -1U;
344 	zdev->msi_nr_irqs = 0;
345 }
346 
zpci_msi_teardown_floating(struct zpci_dev * zdev)347 static void zpci_msi_teardown_floating(struct zpci_dev *zdev)
348 {
349 	airq_iv_release(zdev->aibv);
350 	zdev->aibv = NULL;
351 	airq_iv_free_bit(zpci_sbv, zdev->aisb);
352 	zdev->aisb = -1UL;
353 	zdev->msi_first_bit = -1U;
354 	zdev->msi_nr_irqs = 0;
355 }
356 
zpci_msi_teardown(struct irq_domain * domain,msi_alloc_info_t * arg)357 static void zpci_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *arg)
358 {
359 	struct zpci_dev *zdev = to_zpci_dev(domain->dev);
360 
361 	zpci_clear_irq(zdev);
362 	if (irq_delivery == DIRECTED)
363 		zpci_msi_teardown_directed(zdev);
364 	else
365 		zpci_msi_teardown_floating(zdev);
366 }
367 
zpci_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)368 static int zpci_msi_prepare(struct irq_domain *domain,
369 			    struct device *dev, int nvec,
370 			    msi_alloc_info_t *info)
371 {
372 	struct zpci_dev *zdev = to_zpci_dev(dev);
373 	struct pci_dev *pdev = to_pci_dev(dev);
374 	unsigned long bit;
375 	int msi_vecs, rc;
376 
377 	msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
378 	if (msi_vecs < nvec) {
379 		pr_info("%s requested %d IRQs, allocate system limit of %d\n",
380 			pci_name(pdev), nvec, zdev->max_msi);
381 	}
382 
383 	rc = __alloc_airq(zdev, msi_vecs, &bit);
384 	if (rc) {
385 		pr_err("Allocating adapter IRQs for %s failed\n", pci_name(pdev));
386 		return rc;
387 	}
388 
389 	zdev->msi_first_bit = bit;
390 	zdev->msi_nr_irqs = msi_vecs;
391 	rc = zpci_set_irq(zdev);
392 	if (rc) {
393 		pr_err("Registering adapter IRQs for %s failed\n",
394 		       pci_name(pdev));
395 
396 		if (irq_delivery == DIRECTED)
397 			zpci_msi_teardown_directed(zdev);
398 		else
399 			zpci_msi_teardown_floating(zdev);
400 		return rc;
401 	}
402 	return 0;
403 }
404 
zpci_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)405 static int zpci_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
406 				 unsigned int nr_irqs, void *args)
407 {
408 	struct msi_desc *desc = ((msi_alloc_info_t *)args)->desc;
409 	struct zpci_dev *zdev = to_zpci_dev(desc->dev);
410 	struct zpci_bus *zbus = zdev->zbus;
411 	unsigned int cpu, hwirq;
412 	unsigned long bit;
413 	int i;
414 
415 	bit = zdev->msi_first_bit + desc->msi_index;
416 	hwirq = zpci_encode_hwirq(zdev->devfn, desc->msi_index);
417 
418 	if (desc->msi_index + nr_irqs > zdev->max_msi)
419 		return -EINVAL;
420 
421 	for (i = 0; i < nr_irqs; i++) {
422 		irq_domain_set_info(domain, virq + i, hwirq + i,
423 				    &zpci_irq_chip, zdev,
424 				    handle_percpu_irq, NULL, NULL);
425 
426 		if (irq_delivery == DIRECTED) {
427 			for_each_possible_cpu(cpu) {
428 				airq_iv_set_ptr(zpci_ibv[cpu], bit + i,
429 						(unsigned long)zbus->msi_parent_domain);
430 				airq_iv_set_data(zpci_ibv[cpu], bit + i, hwirq + i);
431 			}
432 		} else {
433 			airq_iv_set_ptr(zdev->aibv, bit + i,
434 					(unsigned long)zbus->msi_parent_domain);
435 			airq_iv_set_data(zdev->aibv, bit + i, hwirq + i);
436 		}
437 	}
438 
439 	return 0;
440 }
441 
zpci_msi_clear_airq(struct irq_data * d,int i)442 static void zpci_msi_clear_airq(struct irq_data *d, int i)
443 {
444 	struct msi_desc *desc = irq_data_get_msi_desc(d);
445 	struct zpci_dev *zdev = to_zpci_dev(desc->dev);
446 	unsigned long bit;
447 	unsigned int cpu;
448 	u16 msi_index;
449 
450 	msi_index = zpci_decode_hwirq_msi_index(d->hwirq);
451 	bit = zdev->msi_first_bit + msi_index;
452 
453 	if (irq_delivery == DIRECTED) {
454 		for_each_possible_cpu(cpu) {
455 			airq_iv_set_ptr(zpci_ibv[cpu], bit + i, 0);
456 			airq_iv_set_data(zpci_ibv[cpu], bit + i, 0);
457 		}
458 	} else {
459 		airq_iv_set_ptr(zdev->aibv, bit + i, 0);
460 		airq_iv_set_data(zdev->aibv, bit + i, 0);
461 	}
462 }
463 
zpci_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)464 static void zpci_msi_domain_free(struct irq_domain *domain, unsigned int virq,
465 				 unsigned int nr_irqs)
466 {
467 	struct irq_data *d;
468 	int i;
469 
470 	for (i = 0; i < nr_irqs; i++) {
471 		d = irq_domain_get_irq_data(domain, virq + i);
472 		zpci_msi_clear_airq(d, i);
473 		irq_domain_reset_irq_data(d);
474 	}
475 }
476 
477 static const struct irq_domain_ops zpci_msi_domain_ops = {
478 	.alloc = zpci_msi_domain_alloc,
479 	.free  = zpci_msi_domain_free,
480 };
481 
zpci_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)482 static bool zpci_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
483 				   struct irq_domain *real_parent,
484 				   struct msi_domain_info *info)
485 {
486 	if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
487 		return false;
488 
489 	info->ops->msi_prepare = zpci_msi_prepare;
490 	info->ops->msi_teardown = zpci_msi_teardown;
491 
492 	return true;
493 }
494 
495 static struct msi_parent_ops zpci_msi_parent_ops = {
496 	.supported_flags   = MSI_GENERIC_FLAGS_MASK	|
497 			     MSI_FLAG_PCI_MSIX		|
498 			     MSI_FLAG_MULTI_PCI_MSI,
499 	.required_flags	   = MSI_FLAG_USE_DEF_DOM_OPS  |
500 			     MSI_FLAG_USE_DEF_CHIP_OPS,
501 	.init_dev_msi_info = zpci_init_dev_msi_info,
502 };
503 
zpci_create_parent_msi_domain(struct zpci_bus * zbus)504 int zpci_create_parent_msi_domain(struct zpci_bus *zbus)
505 {
506 	char fwnode_name[18];
507 
508 	snprintf(fwnode_name, sizeof(fwnode_name), "ZPCI_MSI_DOM_%04x", zbus->domain_nr);
509 	struct irq_domain_info info = {
510 		.fwnode		= irq_domain_alloc_named_fwnode(fwnode_name),
511 		.ops		= &zpci_msi_domain_ops,
512 	};
513 
514 	if (!info.fwnode) {
515 		pr_err("Failed to allocate fwnode for MSI IRQ domain\n");
516 		return -ENOMEM;
517 	}
518 
519 	if (irq_delivery == FLOATING)
520 		zpci_msi_parent_ops.required_flags |= MSI_FLAG_NO_AFFINITY;
521 
522 	zbus->msi_parent_domain = msi_create_parent_irq_domain(&info, &zpci_msi_parent_ops);
523 	if (!zbus->msi_parent_domain) {
524 		irq_domain_free_fwnode(info.fwnode);
525 		pr_err("Failed to create MSI IRQ domain\n");
526 		return -ENOMEM;
527 	}
528 
529 	return 0;
530 }
531 
zpci_remove_parent_msi_domain(struct zpci_bus * zbus)532 void zpci_remove_parent_msi_domain(struct zpci_bus *zbus)
533 {
534 	struct fwnode_handle *fn;
535 
536 	fn = zbus->msi_parent_domain->fwnode;
537 	irq_domain_remove(zbus->msi_parent_domain);
538 	irq_domain_free_fwnode(fn);
539 }
540 
cpu_enable_directed_irq(void * unused)541 static void __init cpu_enable_directed_irq(void *unused)
542 {
543 	union zpci_sic_iib iib = {{0}};
544 	union zpci_sic_iib ziib = {{0}};
545 
546 	iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
547 
548 	zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
549 	zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
550 }
551 
zpci_directed_irq_init(void)552 static int __init zpci_directed_irq_init(void)
553 {
554 	union zpci_sic_iib iib = {{0}};
555 	unsigned int cpu;
556 
557 	zpci_sbv = airq_iv_create(num_possible_cpus(), 0, NULL);
558 	if (!zpci_sbv)
559 		return -ENOMEM;
560 
561 	iib.diib.isc = PCI_ISC;
562 	iib.diib.nr_cpus = num_possible_cpus();
563 	iib.diib.disb_addr = virt_to_phys(zpci_sbv->vector);
564 	zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
565 
566 	zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
567 			   GFP_KERNEL);
568 	if (!zpci_ibv)
569 		return -ENOMEM;
570 
571 	for_each_possible_cpu(cpu) {
572 		/*
573 		 * Per CPU IRQ vectors look the same but bit-allocation
574 		 * is only done on the first vector.
575 		 */
576 		zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
577 					       AIRQ_IV_PTR |
578 					       AIRQ_IV_DATA |
579 					       AIRQ_IV_CACHELINE |
580 					       (!cpu ? AIRQ_IV_ALLOC : 0), NULL);
581 		if (!zpci_ibv[cpu])
582 			return -ENOMEM;
583 	}
584 	on_each_cpu(cpu_enable_directed_irq, NULL, 1);
585 
586 	zpci_irq_chip.irq_set_affinity = zpci_set_irq_affinity;
587 
588 	return 0;
589 }
590 
zpci_floating_irq_init(void)591 static int __init zpci_floating_irq_init(void)
592 {
593 	zpci_ibv = kcalloc(ZPCI_NR_DEVICES, sizeof(*zpci_ibv), GFP_KERNEL);
594 	if (!zpci_ibv)
595 		return -ENOMEM;
596 
597 	zpci_sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
598 	if (!zpci_sbv)
599 		goto out_free;
600 
601 	return 0;
602 
603 out_free:
604 	kfree(zpci_ibv);
605 	return -ENOMEM;
606 }
607 
zpci_irq_init(void)608 int __init zpci_irq_init(void)
609 {
610 	union zpci_sic_iib iib = {{0}};
611 	int rc;
612 
613 	irq_delivery = sclp.has_dirq ? DIRECTED : FLOATING;
614 	if (s390_pci_force_floating)
615 		irq_delivery = FLOATING;
616 
617 	if (irq_delivery == DIRECTED)
618 		zpci_airq.handler = zpci_directed_irq_handler;
619 
620 	rc = register_adapter_interrupt(&zpci_airq);
621 	if (rc)
622 		goto out;
623 	/* Set summary to 1 to be called every time for the ISC. */
624 	*zpci_airq.lsi_ptr = 1;
625 
626 	switch (irq_delivery) {
627 	case FLOATING:
628 		rc = zpci_floating_irq_init();
629 		break;
630 	case DIRECTED:
631 		rc = zpci_directed_irq_init();
632 		break;
633 	}
634 
635 	if (rc)
636 		goto out_airq;
637 
638 	/*
639 	 * Enable floating IRQs (with suppression after one IRQ). When using
640 	 * directed IRQs this enables the fallback path.
641 	 */
642 	zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib);
643 
644 	return 0;
645 out_airq:
646 	unregister_adapter_interrupt(&zpci_airq);
647 out:
648 	return rc;
649 }
650 
zpci_irq_exit(void)651 void __init zpci_irq_exit(void)
652 {
653 	unsigned int cpu;
654 
655 	if (irq_delivery == DIRECTED) {
656 		for_each_possible_cpu(cpu) {
657 			airq_iv_release(zpci_ibv[cpu]);
658 		}
659 	}
660 	kfree(zpci_ibv);
661 	if (zpci_sbv)
662 		airq_iv_release(zpci_sbv);
663 	unregister_adapter_interrupt(&zpci_airq);
664 }
665