1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 * Copyright (C) 2016 Christoph Hellwig.
8 */
9 #include <linux/bitfield.h>
10 #include <linux/err.h>
11 #include <linux/export.h>
12 #include <linux/irq.h>
13
14 #include "../pci.h"
15 #include "msi.h"
16
17 int pci_msi_enable = 1;
18 int pci_msi_ignore_mask;
19
20 /**
21 * pci_msi_supported - check whether MSI may be enabled on a device
22 * @dev: pointer to the pci_dev data structure of MSI device function
23 * @nvec: how many MSIs have been requested?
24 *
25 * Look at global flags, the device itself, and its parent buses
26 * to determine if MSI/-X are supported for the device. If MSI/-X is
27 * supported return 1, else return 0.
28 **/
pci_msi_supported(struct pci_dev * dev,int nvec)29 static int pci_msi_supported(struct pci_dev *dev, int nvec)
30 {
31 struct pci_bus *bus;
32
33 /* MSI must be globally enabled and supported by the device */
34 if (!pci_msi_enable)
35 return 0;
36
37 if (!dev || dev->no_msi)
38 return 0;
39
40 /*
41 * You can't ask to have 0 or less MSIs configured.
42 * a) it's stupid ..
43 * b) the list manipulation code assumes nvec >= 1.
44 */
45 if (nvec < 1)
46 return 0;
47
48 /*
49 * Any bridge which does NOT route MSI transactions from its
50 * secondary bus to its primary bus must set NO_MSI flag on
51 * the secondary pci_bus.
52 *
53 * The NO_MSI flag can either be set directly by:
54 * - arch-specific PCI host bus controller drivers (deprecated)
55 * - quirks for specific PCI bridges
56 *
57 * or indirectly by platform-specific PCI host bridge drivers by
58 * advertising the 'msi_domain' property, which results in
59 * the NO_MSI flag when no MSI domain is found for this bridge
60 * at probe time.
61 */
62 for (bus = dev->bus; bus; bus = bus->parent)
63 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
64 return 0;
65
66 return 1;
67 }
68
pcim_msi_release(void * pcidev)69 static void pcim_msi_release(void *pcidev)
70 {
71 struct pci_dev *dev = pcidev;
72
73 dev->is_msi_managed = false;
74 pci_free_irq_vectors(dev);
75 }
76
77 /*
78 * Needs to be separate from pcim_release to prevent an ordering problem
79 * vs. msi_device_data_release() in the MSI core code.
80 */
pcim_setup_msi_release(struct pci_dev * dev)81 static int pcim_setup_msi_release(struct pci_dev *dev)
82 {
83 int ret;
84
85 if (!pci_is_managed(dev) || dev->is_msi_managed)
86 return 0;
87
88 ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
89 if (ret)
90 return ret;
91
92 dev->is_msi_managed = true;
93 return 0;
94 }
95
96 /*
97 * Ordering vs. devres: msi device data has to be installed first so that
98 * pcim_msi_release() is invoked before it on device release.
99 */
pci_setup_msi_context(struct pci_dev * dev)100 static int pci_setup_msi_context(struct pci_dev *dev)
101 {
102 int ret = msi_setup_device_data(&dev->dev);
103
104 if (ret)
105 return ret;
106
107 return pcim_setup_msi_release(dev);
108 }
109
110 /*
111 * Helper functions for mask/unmask and MSI message handling
112 */
113
pci_msi_update_mask(struct msi_desc * desc,u32 clear,u32 set)114 void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
115 {
116 raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock;
117 unsigned long flags;
118
119 if (!desc->pci.msi_attrib.can_mask)
120 return;
121
122 raw_spin_lock_irqsave(lock, flags);
123 desc->pci.msi_mask &= ~clear;
124 desc->pci.msi_mask |= set;
125 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos,
126 desc->pci.msi_mask);
127 raw_spin_unlock_irqrestore(lock, flags);
128 }
129
130 /**
131 * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
132 * @data: pointer to irqdata associated to that interrupt
133 */
pci_msi_mask_irq(struct irq_data * data)134 void pci_msi_mask_irq(struct irq_data *data)
135 {
136 struct msi_desc *desc = irq_data_get_msi_desc(data);
137
138 __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq));
139 }
140 EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
141
142 /**
143 * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
144 * @data: pointer to irqdata associated to that interrupt
145 */
pci_msi_unmask_irq(struct irq_data * data)146 void pci_msi_unmask_irq(struct irq_data *data)
147 {
148 struct msi_desc *desc = irq_data_get_msi_desc(data);
149
150 __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq));
151 }
152 EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
153
__pci_read_msi_msg(struct msi_desc * entry,struct msi_msg * msg)154 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
155 {
156 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
157
158 BUG_ON(dev->current_state != PCI_D0);
159
160 if (entry->pci.msi_attrib.is_msix) {
161 void __iomem *base = pci_msix_desc_addr(entry);
162
163 if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual))
164 return;
165
166 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
167 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
168 msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
169 } else {
170 int pos = dev->msi_cap;
171 u16 data;
172
173 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
174 &msg->address_lo);
175 if (entry->pci.msi_attrib.is_64) {
176 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
177 &msg->address_hi);
178 pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
179 } else {
180 msg->address_hi = 0;
181 pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
182 }
183 msg->data = data;
184 }
185 }
186
pci_write_msg_msi(struct pci_dev * dev,struct msi_desc * desc,struct msi_msg * msg)187 static inline void pci_write_msg_msi(struct pci_dev *dev, struct msi_desc *desc,
188 struct msi_msg *msg)
189 {
190 int pos = dev->msi_cap;
191 u16 msgctl;
192
193 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
194 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
195 msgctl |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, desc->pci.msi_attrib.multiple);
196 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
197
198 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo);
199 if (desc->pci.msi_attrib.is_64) {
200 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, msg->address_hi);
201 pci_write_config_word(dev, pos + PCI_MSI_DATA_64, msg->data);
202 } else {
203 pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data);
204 }
205 /* Ensure that the writes are visible in the device */
206 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
207 }
208
pci_write_msg_msix(struct msi_desc * desc,struct msi_msg * msg)209 static inline void pci_write_msg_msix(struct msi_desc *desc, struct msi_msg *msg)
210 {
211 void __iomem *base = pci_msix_desc_addr(desc);
212 u32 ctrl = desc->pci.msix_ctrl;
213 bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
214
215 if (desc->pci.msi_attrib.is_virtual)
216 return;
217 /*
218 * The specification mandates that the entry is masked
219 * when the message is modified:
220 *
221 * "If software changes the Address or Data value of an
222 * entry while the entry is unmasked, the result is
223 * undefined."
224 */
225 if (unmasked)
226 pci_msix_write_vector_ctrl(desc, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT);
227
228 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
229 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
230 writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
231
232 if (unmasked)
233 pci_msix_write_vector_ctrl(desc, ctrl);
234
235 /* Ensure that the writes are visible in the device */
236 readl(base + PCI_MSIX_ENTRY_DATA);
237 }
238
__pci_write_msi_msg(struct msi_desc * entry,struct msi_msg * msg)239 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
240 {
241 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
242
243 if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
244 /* Don't touch the hardware now */
245 } else if (entry->pci.msi_attrib.is_msix) {
246 pci_write_msg_msix(entry, msg);
247 } else {
248 pci_write_msg_msi(dev, entry, msg);
249 }
250
251 entry->msg = *msg;
252
253 if (entry->write_msi_msg)
254 entry->write_msi_msg(entry, entry->write_msi_msg_data);
255 }
256
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)257 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
258 {
259 struct msi_desc *entry = irq_get_msi_desc(irq);
260
261 __pci_write_msi_msg(entry, msg);
262 }
263 EXPORT_SYMBOL_GPL(pci_write_msi_msg);
264
265
266 /* PCI/MSI specific functionality */
267
pci_intx_for_msi(struct pci_dev * dev,int enable)268 static void pci_intx_for_msi(struct pci_dev *dev, int enable)
269 {
270 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
271 pci_intx(dev, enable);
272 }
273
pci_msi_set_enable(struct pci_dev * dev,int enable)274 static void pci_msi_set_enable(struct pci_dev *dev, int enable)
275 {
276 u16 control;
277
278 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
279 control &= ~PCI_MSI_FLAGS_ENABLE;
280 if (enable)
281 control |= PCI_MSI_FLAGS_ENABLE;
282 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
283 }
284
msi_setup_msi_desc(struct pci_dev * dev,int nvec,struct irq_affinity_desc * masks)285 static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
286 struct irq_affinity_desc *masks)
287 {
288 struct msi_desc desc;
289 u16 control;
290
291 /* MSI Entry Initialization */
292 memset(&desc, 0, sizeof(desc));
293
294 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
295 /* Lies, damned lies, and MSIs */
296 if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
297 control |= PCI_MSI_FLAGS_MASKBIT;
298 /* Respect XEN's mask disabling */
299 if (pci_msi_ignore_mask)
300 control &= ~PCI_MSI_FLAGS_MASKBIT;
301
302 desc.nvec_used = nvec;
303 desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
304 desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
305 desc.pci.msi_attrib.default_irq = dev->irq;
306 desc.pci.msi_attrib.multi_cap = FIELD_GET(PCI_MSI_FLAGS_QMASK, control);
307 desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
308 desc.affinity = masks;
309
310 if (control & PCI_MSI_FLAGS_64BIT)
311 desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
312 else
313 desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
314
315 /* Save the initial mask status */
316 if (desc.pci.msi_attrib.can_mask)
317 pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
318
319 return msi_insert_msi_desc(&dev->dev, &desc);
320 }
321
msi_verify_entries(struct pci_dev * dev)322 static int msi_verify_entries(struct pci_dev *dev)
323 {
324 struct msi_desc *entry;
325
326 if (!dev->no_64bit_msi)
327 return 0;
328
329 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
330 if (entry->msg.address_hi) {
331 pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
332 entry->msg.address_hi, entry->msg.address_lo);
333 break;
334 }
335 }
336 return !entry ? 0 : -EIO;
337 }
338
339 /**
340 * msi_capability_init - configure device's MSI capability structure
341 * @dev: pointer to the pci_dev data structure of MSI device function
342 * @nvec: number of interrupts to allocate
343 * @affd: description of automatic IRQ affinity assignments (may be %NULL)
344 *
345 * Setup the MSI capability structure of the device with the requested
346 * number of interrupts. A return value of zero indicates the successful
347 * setup of an entry with the new MSI IRQ. A negative return value indicates
348 * an error, and a positive return value indicates the number of interrupts
349 * which could have been allocated.
350 */
msi_capability_init(struct pci_dev * dev,int nvec,struct irq_affinity * affd)351 static int msi_capability_init(struct pci_dev *dev, int nvec,
352 struct irq_affinity *affd)
353 {
354 struct irq_affinity_desc *masks = NULL;
355 struct msi_desc *entry, desc;
356 int ret;
357
358 /* Reject multi-MSI early on irq domain enabled architectures */
359 if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
360 return 1;
361
362 /*
363 * Disable MSI during setup in the hardware, but mark it enabled
364 * so that setup code can evaluate it.
365 */
366 pci_msi_set_enable(dev, 0);
367 dev->msi_enabled = 1;
368
369 if (affd)
370 masks = irq_create_affinity_masks(nvec, affd);
371
372 msi_lock_descs(&dev->dev);
373 ret = msi_setup_msi_desc(dev, nvec, masks);
374 if (ret)
375 goto fail;
376
377 /* All MSIs are unmasked by default; mask them all */
378 entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
379 pci_msi_mask(entry, msi_multi_mask(entry));
380 /*
381 * Copy the MSI descriptor for the error path because
382 * pci_msi_setup_msi_irqs() will free it for the hierarchical
383 * interrupt domain case.
384 */
385 memcpy(&desc, entry, sizeof(desc));
386
387 /* Configure MSI capability structure */
388 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
389 if (ret)
390 goto err;
391
392 ret = msi_verify_entries(dev);
393 if (ret)
394 goto err;
395
396 /* Set MSI enabled bits */
397 pci_intx_for_msi(dev, 0);
398 pci_msi_set_enable(dev, 1);
399
400 pcibios_free_irq(dev);
401 dev->irq = entry->irq;
402 goto unlock;
403
404 err:
405 pci_msi_unmask(&desc, msi_multi_mask(&desc));
406 pci_free_msi_irqs(dev);
407 fail:
408 dev->msi_enabled = 0;
409 unlock:
410 msi_unlock_descs(&dev->dev);
411 kfree(masks);
412 return ret;
413 }
414
__pci_enable_msi_range(struct pci_dev * dev,int minvec,int maxvec,struct irq_affinity * affd)415 int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
416 struct irq_affinity *affd)
417 {
418 int nvec;
419 int rc;
420
421 if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
422 return -EINVAL;
423
424 /* Check whether driver already requested MSI-X IRQs */
425 if (dev->msix_enabled) {
426 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
427 return -EINVAL;
428 }
429
430 if (maxvec < minvec)
431 return -ERANGE;
432
433 if (WARN_ON_ONCE(dev->msi_enabled))
434 return -EINVAL;
435
436 nvec = pci_msi_vec_count(dev);
437 if (nvec < 0)
438 return nvec;
439 if (nvec < minvec)
440 return -ENOSPC;
441
442 if (nvec > maxvec)
443 nvec = maxvec;
444
445 rc = pci_setup_msi_context(dev);
446 if (rc)
447 return rc;
448
449 if (!pci_setup_msi_device_domain(dev))
450 return -ENODEV;
451
452 for (;;) {
453 if (affd) {
454 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
455 if (nvec < minvec)
456 return -ENOSPC;
457 }
458
459 rc = msi_capability_init(dev, nvec, affd);
460 if (rc == 0)
461 return nvec;
462
463 if (rc < 0)
464 return rc;
465 if (rc < minvec)
466 return -ENOSPC;
467
468 nvec = rc;
469 }
470 }
471
472 /**
473 * pci_msi_vec_count - Return the number of MSI vectors a device can send
474 * @dev: device to report about
475 *
476 * This function returns the number of MSI vectors a device requested via
477 * Multiple Message Capable register. It returns a negative errno if the
478 * device is not capable sending MSI interrupts. Otherwise, the call succeeds
479 * and returns a power of two, up to a maximum of 2^5 (32), according to the
480 * MSI specification.
481 **/
pci_msi_vec_count(struct pci_dev * dev)482 int pci_msi_vec_count(struct pci_dev *dev)
483 {
484 int ret;
485 u16 msgctl;
486
487 if (!dev->msi_cap)
488 return -EINVAL;
489
490 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
491 ret = 1 << FIELD_GET(PCI_MSI_FLAGS_QMASK, msgctl);
492
493 return ret;
494 }
495 EXPORT_SYMBOL(pci_msi_vec_count);
496
497 /*
498 * Architecture override returns true when the PCI MSI message should be
499 * written by the generic restore function.
500 */
arch_restore_msi_irqs(struct pci_dev * dev)501 bool __weak arch_restore_msi_irqs(struct pci_dev *dev)
502 {
503 return true;
504 }
505
__pci_restore_msi_state(struct pci_dev * dev)506 void __pci_restore_msi_state(struct pci_dev *dev)
507 {
508 struct msi_desc *entry;
509 u16 control;
510
511 if (!dev->msi_enabled)
512 return;
513
514 entry = irq_get_msi_desc(dev->irq);
515
516 pci_intx_for_msi(dev, 0);
517 pci_msi_set_enable(dev, 0);
518 if (arch_restore_msi_irqs(dev))
519 __pci_write_msi_msg(entry, &entry->msg);
520
521 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
522 pci_msi_update_mask(entry, 0, 0);
523 control &= ~PCI_MSI_FLAGS_QSIZE;
524 control |= PCI_MSI_FLAGS_ENABLE |
525 FIELD_PREP(PCI_MSI_FLAGS_QSIZE, entry->pci.msi_attrib.multiple);
526 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
527 }
528
pci_msi_shutdown(struct pci_dev * dev)529 void pci_msi_shutdown(struct pci_dev *dev)
530 {
531 struct msi_desc *desc;
532
533 if (!pci_msi_enable || !dev || !dev->msi_enabled)
534 return;
535
536 pci_msi_set_enable(dev, 0);
537 pci_intx_for_msi(dev, 1);
538 dev->msi_enabled = 0;
539
540 /* Return the device with MSI unmasked as initial states */
541 desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
542 if (!WARN_ON_ONCE(!desc))
543 pci_msi_unmask(desc, msi_multi_mask(desc));
544
545 /* Restore dev->irq to its default pin-assertion IRQ */
546 dev->irq = desc->pci.msi_attrib.default_irq;
547 pcibios_alloc_irq(dev);
548 }
549
550 /* PCI/MSI-X specific functionality */
551
pci_msix_clear_and_set_ctrl(struct pci_dev * dev,u16 clear,u16 set)552 static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
553 {
554 u16 ctrl;
555
556 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
557 ctrl &= ~clear;
558 ctrl |= set;
559 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
560 }
561
msix_map_region(struct pci_dev * dev,unsigned int nr_entries)562 static void __iomem *msix_map_region(struct pci_dev *dev,
563 unsigned int nr_entries)
564 {
565 resource_size_t phys_addr;
566 u32 table_offset;
567 unsigned long flags;
568 u8 bir;
569
570 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
571 &table_offset);
572 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
573 flags = pci_resource_flags(dev, bir);
574 if (!flags || (flags & IORESOURCE_UNSET))
575 return NULL;
576
577 table_offset &= PCI_MSIX_TABLE_OFFSET;
578 phys_addr = pci_resource_start(dev, bir) + table_offset;
579
580 return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
581 }
582
583 /**
584 * msix_prepare_msi_desc - Prepare a half initialized MSI descriptor for operation
585 * @dev: The PCI device for which the descriptor is prepared
586 * @desc: The MSI descriptor for preparation
587 *
588 * This is separate from msix_setup_msi_descs() below to handle dynamic
589 * allocations for MSI-X after initial enablement.
590 *
591 * Ideally the whole MSI-X setup would work that way, but there is no way to
592 * support this for the legacy arch_setup_msi_irqs() mechanism and for the
593 * fake irq domains like the x86 XEN one. Sigh...
594 *
595 * The descriptor is zeroed and only @desc::msi_index and @desc::affinity
596 * are set. When called from msix_setup_msi_descs() then the is_virtual
597 * attribute is initialized as well.
598 *
599 * Fill in the rest.
600 */
msix_prepare_msi_desc(struct pci_dev * dev,struct msi_desc * desc)601 void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
602 {
603 desc->nvec_used = 1;
604 desc->pci.msi_attrib.is_msix = 1;
605 desc->pci.msi_attrib.is_64 = 1;
606 desc->pci.msi_attrib.default_irq = dev->irq;
607 desc->pci.mask_base = dev->msix_base;
608 desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
609 !desc->pci.msi_attrib.is_virtual;
610
611 if (desc->pci.msi_attrib.can_mask) {
612 void __iomem *addr = pci_msix_desc_addr(desc);
613
614 desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
615 }
616 }
617
msix_setup_msi_descs(struct pci_dev * dev,struct msix_entry * entries,int nvec,struct irq_affinity_desc * masks)618 static int msix_setup_msi_descs(struct pci_dev *dev, struct msix_entry *entries,
619 int nvec, struct irq_affinity_desc *masks)
620 {
621 int ret = 0, i, vec_count = pci_msix_vec_count(dev);
622 struct irq_affinity_desc *curmsk;
623 struct msi_desc desc;
624
625 memset(&desc, 0, sizeof(desc));
626
627 for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
628 desc.msi_index = entries ? entries[i].entry : i;
629 desc.affinity = masks ? curmsk : NULL;
630 desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
631
632 msix_prepare_msi_desc(dev, &desc);
633
634 ret = msi_insert_msi_desc(&dev->dev, &desc);
635 if (ret)
636 break;
637 }
638 return ret;
639 }
640
msix_update_entries(struct pci_dev * dev,struct msix_entry * entries)641 static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
642 {
643 struct msi_desc *desc;
644
645 if (entries) {
646 msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
647 entries->vector = desc->irq;
648 entries++;
649 }
650 }
651 }
652
msix_mask_all(void __iomem * base,int tsize)653 static void msix_mask_all(void __iomem *base, int tsize)
654 {
655 u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
656 int i;
657
658 if (pci_msi_ignore_mask)
659 return;
660
661 for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
662 writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
663 }
664
msix_setup_interrupts(struct pci_dev * dev,struct msix_entry * entries,int nvec,struct irq_affinity * affd)665 static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
666 int nvec, struct irq_affinity *affd)
667 {
668 struct irq_affinity_desc *masks = NULL;
669 int ret;
670
671 if (affd)
672 masks = irq_create_affinity_masks(nvec, affd);
673
674 msi_lock_descs(&dev->dev);
675 ret = msix_setup_msi_descs(dev, entries, nvec, masks);
676 if (ret)
677 goto out_free;
678
679 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
680 if (ret)
681 goto out_free;
682
683 /* Check if all MSI entries honor device restrictions */
684 ret = msi_verify_entries(dev);
685 if (ret)
686 goto out_free;
687
688 msix_update_entries(dev, entries);
689 goto out_unlock;
690
691 out_free:
692 pci_free_msi_irqs(dev);
693 out_unlock:
694 msi_unlock_descs(&dev->dev);
695 kfree(masks);
696 return ret;
697 }
698
699 /**
700 * msix_capability_init - configure device's MSI-X capability
701 * @dev: pointer to the pci_dev data structure of MSI-X device function
702 * @entries: pointer to an array of struct msix_entry entries
703 * @nvec: number of @entries
704 * @affd: Optional pointer to enable automatic affinity assignment
705 *
706 * Setup the MSI-X capability structure of device function with a
707 * single MSI-X IRQ. A return of zero indicates the successful setup of
708 * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
709 **/
msix_capability_init(struct pci_dev * dev,struct msix_entry * entries,int nvec,struct irq_affinity * affd)710 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
711 int nvec, struct irq_affinity *affd)
712 {
713 int ret, tsize;
714 u16 control;
715
716 /*
717 * Some devices require MSI-X to be enabled before the MSI-X
718 * registers can be accessed. Mask all the vectors to prevent
719 * interrupts coming in before they're fully set up.
720 */
721 pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
722 PCI_MSIX_FLAGS_ENABLE);
723
724 /* Mark it enabled so setup functions can query it */
725 dev->msix_enabled = 1;
726
727 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
728 /* Request & Map MSI-X table region */
729 tsize = msix_table_size(control);
730 dev->msix_base = msix_map_region(dev, tsize);
731 if (!dev->msix_base) {
732 ret = -ENOMEM;
733 goto out_disable;
734 }
735
736 ret = msix_setup_interrupts(dev, entries, nvec, affd);
737 if (ret)
738 goto out_disable;
739
740 /* Disable INTX */
741 pci_intx_for_msi(dev, 0);
742
743 /*
744 * Ensure that all table entries are masked to prevent
745 * stale entries from firing in a crash kernel.
746 *
747 * Done late to deal with a broken Marvell NVME device
748 * which takes the MSI-X mask bits into account even
749 * when MSI-X is disabled, which prevents MSI delivery.
750 */
751 msix_mask_all(dev->msix_base, tsize);
752 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
753
754 pcibios_free_irq(dev);
755 return 0;
756
757 out_disable:
758 dev->msix_enabled = 0;
759 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
760
761 return ret;
762 }
763
pci_msix_validate_entries(struct pci_dev * dev,struct msix_entry * entries,int nvec)764 static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries, int nvec)
765 {
766 bool nogap;
767 int i, j;
768
769 if (!entries)
770 return true;
771
772 nogap = pci_msi_domain_supports(dev, MSI_FLAG_MSIX_CONTIGUOUS, DENY_LEGACY);
773
774 for (i = 0; i < nvec; i++) {
775 /* Check for duplicate entries */
776 for (j = i + 1; j < nvec; j++) {
777 if (entries[i].entry == entries[j].entry)
778 return false;
779 }
780 /* Check for unsupported gaps */
781 if (nogap && entries[i].entry != i)
782 return false;
783 }
784 return true;
785 }
786
__pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec,struct irq_affinity * affd,int flags)787 int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec,
788 int maxvec, struct irq_affinity *affd, int flags)
789 {
790 int hwsize, rc, nvec = maxvec;
791
792 if (maxvec < minvec)
793 return -ERANGE;
794
795 if (dev->msi_enabled) {
796 pci_info(dev, "can't enable MSI-X (MSI already enabled)\n");
797 return -EINVAL;
798 }
799
800 if (WARN_ON_ONCE(dev->msix_enabled))
801 return -EINVAL;
802
803 /* Check MSI-X early on irq domain enabled architectures */
804 if (!pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX, ALLOW_LEGACY))
805 return -ENOTSUPP;
806
807 if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
808 return -EINVAL;
809
810 hwsize = pci_msix_vec_count(dev);
811 if (hwsize < 0)
812 return hwsize;
813
814 if (!pci_msix_validate_entries(dev, entries, nvec))
815 return -EINVAL;
816
817 if (hwsize < nvec) {
818 /* Keep the IRQ virtual hackery working */
819 if (flags & PCI_IRQ_VIRTUAL)
820 hwsize = nvec;
821 else
822 nvec = hwsize;
823 }
824
825 if (nvec < minvec)
826 return -ENOSPC;
827
828 rc = pci_setup_msi_context(dev);
829 if (rc)
830 return rc;
831
832 if (!pci_setup_msix_device_domain(dev, hwsize))
833 return -ENODEV;
834
835 for (;;) {
836 if (affd) {
837 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
838 if (nvec < minvec)
839 return -ENOSPC;
840 }
841
842 rc = msix_capability_init(dev, entries, nvec, affd);
843 if (rc == 0)
844 return nvec;
845
846 if (rc < 0)
847 return rc;
848 if (rc < minvec)
849 return -ENOSPC;
850
851 nvec = rc;
852 }
853 }
854
__pci_restore_msix_state(struct pci_dev * dev)855 void __pci_restore_msix_state(struct pci_dev *dev)
856 {
857 struct msi_desc *entry;
858 bool write_msg;
859
860 if (!dev->msix_enabled)
861 return;
862
863 /* route the table */
864 pci_intx_for_msi(dev, 0);
865 pci_msix_clear_and_set_ctrl(dev, 0,
866 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
867
868 write_msg = arch_restore_msi_irqs(dev);
869
870 msi_lock_descs(&dev->dev);
871 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
872 if (write_msg)
873 __pci_write_msi_msg(entry, &entry->msg);
874 pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
875 }
876 msi_unlock_descs(&dev->dev);
877
878 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
879 }
880
pci_msix_shutdown(struct pci_dev * dev)881 void pci_msix_shutdown(struct pci_dev *dev)
882 {
883 struct msi_desc *desc;
884
885 if (!pci_msi_enable || !dev || !dev->msix_enabled)
886 return;
887
888 if (pci_dev_is_disconnected(dev)) {
889 dev->msix_enabled = 0;
890 return;
891 }
892
893 /* Return the device with MSI-X masked as initial states */
894 msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
895 pci_msix_mask(desc);
896
897 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
898 pci_intx_for_msi(dev, 1);
899 dev->msix_enabled = 0;
900 pcibios_alloc_irq(dev);
901 }
902
903 /* Common interfaces */
904
pci_free_msi_irqs(struct pci_dev * dev)905 void pci_free_msi_irqs(struct pci_dev *dev)
906 {
907 pci_msi_teardown_msi_irqs(dev);
908
909 if (dev->msix_base) {
910 iounmap(dev->msix_base);
911 dev->msix_base = NULL;
912 }
913 }
914
915 /* Misc. infrastructure */
916
msi_desc_to_pci_dev(struct msi_desc * desc)917 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
918 {
919 return to_pci_dev(desc->dev);
920 }
921 EXPORT_SYMBOL(msi_desc_to_pci_dev);
922
pci_no_msi(void)923 void pci_no_msi(void)
924 {
925 pci_msi_enable = 0;
926 }
927