1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4
5 /*
6 * This header file contains MSI data structures and functions which are
7 * only relevant for:
8 * - Interrupt core code
9 * - PCI/MSI core code
10 * - MSI interrupt domain implementations
11 * - IOMMU, low level VFIO, NTB and other justified exceptions
12 * dealing with low level MSI details.
13 *
14 * Regular device drivers have no business with any of these functions and
15 * especially storing MSI descriptor pointers in random code is considered
16 * abuse.
17 *
18 * Device driver relevant functions are available in <linux/msi_api.h>
19 */
20
21 #include <linux/irqdomain_defs.h>
22 #include <linux/cpumask_types.h>
23 #include <linux/msi_api.h>
24 #include <linux/irq.h>
25
26 #include <asm/msi.h>
27
28 /* Dummy shadow structures if an architecture does not define them */
29 #ifndef arch_msi_msg_addr_lo
30 typedef struct arch_msi_msg_addr_lo {
31 u32 address_lo;
32 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
33 #endif
34
35 #ifndef arch_msi_msg_addr_hi
36 typedef struct arch_msi_msg_addr_hi {
37 u32 address_hi;
38 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
39 #endif
40
41 #ifndef arch_msi_msg_data
42 typedef struct arch_msi_msg_data {
43 u32 data;
44 } __attribute__ ((packed)) arch_msi_msg_data_t;
45 #endif
46
47 #ifndef arch_is_isolated_msi
48 #define arch_is_isolated_msi() false
49 #endif
50
51 /**
52 * msi_msg - Representation of a MSI message
53 * @address_lo: Low 32 bits of msi message address
54 * @arch_addrlo: Architecture specific shadow of @address_lo
55 * @address_hi: High 32 bits of msi message address
56 * (only used when device supports it)
57 * @arch_addrhi: Architecture specific shadow of @address_hi
58 * @data: MSI message data (usually 16 bits)
59 * @arch_data: Architecture specific shadow of @data
60 */
61 struct msi_msg {
62 union {
63 u32 address_lo;
64 arch_msi_msg_addr_lo_t arch_addr_lo;
65 };
66 union {
67 u32 address_hi;
68 arch_msi_msg_addr_hi_t arch_addr_hi;
69 };
70 union {
71 u32 data;
72 arch_msi_msg_data_t arch_data;
73 };
74 };
75
76 extern int pci_msi_ignore_mask;
77 /* Helper functions */
78 struct msi_desc;
79 struct pci_dev;
80 struct device_attribute;
81 struct irq_domain;
82 struct irq_affinity_desc;
83
84 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
85 #ifdef CONFIG_GENERIC_MSI_IRQ
86 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
87 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)88 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
89 #endif
90
91 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
92 struct msi_msg *msg);
93
94 /**
95 * pci_msi_desc - PCI/MSI specific MSI descriptor data
96 *
97 * @msi_mask: [PCI MSI] MSI cached mask bits
98 * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
99 * @is_msix: [PCI MSI/X] True if MSI-X
100 * @multiple: [PCI MSI/X] log2 num of messages allocated
101 * @multi_cap: [PCI MSI/X] log2 num of messages supported
102 * @can_mask: [PCI MSI/X] Masking supported?
103 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
104 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
105 * @mask_pos: [PCI MSI] Mask register position
106 * @mask_base: [PCI MSI-X] Mask register base address
107 */
108 struct pci_msi_desc {
109 union {
110 u32 msi_mask;
111 u32 msix_ctrl;
112 };
113 struct {
114 u8 is_msix : 1;
115 u8 multiple : 3;
116 u8 multi_cap : 3;
117 u8 can_mask : 1;
118 u8 is_64 : 1;
119 u8 is_virtual : 1;
120 unsigned default_irq;
121 } msi_attrib;
122 union {
123 u8 mask_pos;
124 void __iomem *mask_base;
125 };
126 };
127
128 /**
129 * union msi_domain_cookie - Opaque MSI domain specific data
130 * @value: u64 value store
131 * @ptr: Pointer to domain specific data
132 * @iobase: Domain specific IOmem pointer
133 *
134 * The content of this data is implementation defined and used by the MSI
135 * domain to store domain specific information which is requried for
136 * interrupt chip callbacks.
137 */
138 union msi_domain_cookie {
139 u64 value;
140 void *ptr;
141 void __iomem *iobase;
142 };
143
144 /**
145 * struct msi_desc_data - Generic MSI descriptor data
146 * @dcookie: Cookie for MSI domain specific data which is required
147 * for irq_chip callbacks
148 * @icookie: Cookie for the MSI interrupt instance provided by
149 * the usage site to the allocation function
150 *
151 * The content of this data is implementation defined, e.g. PCI/IMS
152 * implementations define the meaning of the data. The MSI core ignores
153 * this data completely.
154 */
155 struct msi_desc_data {
156 union msi_domain_cookie dcookie;
157 union msi_instance_cookie icookie;
158 };
159
160 #define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
161
162 /**
163 * struct msi_desc - Descriptor structure for MSI based interrupts
164 * @irq: The base interrupt number
165 * @nvec_used: The number of vectors used
166 * @dev: Pointer to the device which uses this descriptor
167 * @msg: The last set MSI message cached for reuse
168 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
169 * @sysfs_attr: Pointer to sysfs device attribute
170 *
171 * @write_msi_msg: Callback that may be called when the MSI message
172 * address or data changes
173 * @write_msi_msg_data: Data parameter for the callback.
174 *
175 * @msi_index: Index of the msi descriptor
176 * @pci: PCI specific msi descriptor data
177 * @data: Generic MSI descriptor data
178 */
179 struct msi_desc {
180 /* Shared device/bus type independent data */
181 unsigned int irq;
182 unsigned int nvec_used;
183 struct device *dev;
184 struct msi_msg msg;
185 struct irq_affinity_desc *affinity;
186 #ifdef CONFIG_IRQ_MSI_IOMMU
187 const void *iommu_cookie;
188 #endif
189 #ifdef CONFIG_SYSFS
190 struct device_attribute *sysfs_attrs;
191 #endif
192
193 void (*write_msi_msg)(struct msi_desc *entry, void *data);
194 void *write_msi_msg_data;
195
196 u16 msi_index;
197 union {
198 struct pci_msi_desc pci;
199 struct msi_desc_data data;
200 };
201 };
202
203 /*
204 * Filter values for the MSI descriptor iterators and accessor functions.
205 */
206 enum msi_desc_filter {
207 /* All descriptors */
208 MSI_DESC_ALL,
209 /* Descriptors which have no interrupt associated */
210 MSI_DESC_NOTASSOCIATED,
211 /* Descriptors which have an interrupt associated */
212 MSI_DESC_ASSOCIATED,
213 };
214
215
216 /**
217 * struct msi_dev_domain - The internals of MSI domain info per device
218 * @store: Xarray for storing MSI descriptor pointers
219 * @irqdomain: Pointer to a per device interrupt domain
220 */
221 struct msi_dev_domain {
222 struct xarray store;
223 struct irq_domain *domain;
224 };
225
226 int msi_setup_device_data(struct device *dev);
227
228 void msi_lock_descs(struct device *dev);
229 void msi_unlock_descs(struct device *dev);
230
231 struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
232 enum msi_desc_filter filter);
233
234 /**
235 * msi_first_desc - Get the first MSI descriptor of the default irqdomain
236 * @dev: Device to operate on
237 * @filter: Descriptor state filter
238 *
239 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
240 * must be invoked before the call.
241 *
242 * Return: Pointer to the first MSI descriptor matching the search
243 * criteria, NULL if none found.
244 */
msi_first_desc(struct device * dev,enum msi_desc_filter filter)245 static inline struct msi_desc *msi_first_desc(struct device *dev,
246 enum msi_desc_filter filter)
247 {
248 return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
249 }
250
251 struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
252 enum msi_desc_filter filter);
253
254 /**
255 * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
256 *
257 * @desc: struct msi_desc pointer used as iterator
258 * @dev: struct device pointer - device to iterate
259 * @domid: The id of the interrupt domain which should be walked.
260 * @filter: Filter for descriptor selection
261 *
262 * Notes:
263 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
264 * pair.
265 * - It is safe to remove a retrieved MSI descriptor in the loop.
266 */
267 #define msi_domain_for_each_desc(desc, dev, domid, filter) \
268 for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
269 (desc) = msi_next_desc((dev), (domid), (filter)))
270
271 /**
272 * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
273 *
274 * @desc: struct msi_desc pointer used as iterator
275 * @dev: struct device pointer - device to iterate
276 * @filter: Filter for descriptor selection
277 *
278 * Notes:
279 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
280 * pair.
281 * - It is safe to remove a retrieved MSI descriptor in the loop.
282 */
283 #define msi_for_each_desc(desc, dev, filter) \
284 msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
285
286 #define msi_desc_to_dev(desc) ((desc)->dev)
287
288 #ifdef CONFIG_IRQ_MSI_IOMMU
msi_desc_get_iommu_cookie(struct msi_desc * desc)289 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
290 {
291 return desc->iommu_cookie;
292 }
293
msi_desc_set_iommu_cookie(struct msi_desc * desc,const void * iommu_cookie)294 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
295 const void *iommu_cookie)
296 {
297 desc->iommu_cookie = iommu_cookie;
298 }
299 #else
msi_desc_get_iommu_cookie(struct msi_desc * desc)300 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
301 {
302 return NULL;
303 }
304
msi_desc_set_iommu_cookie(struct msi_desc * desc,const void * iommu_cookie)305 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
306 const void *iommu_cookie)
307 {
308 }
309 #endif
310
311 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
312 struct msi_desc *init_desc);
313 /**
314 * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
315 * default irqdomain and insert it at @init_desc->msi_index
316 * @dev: Pointer to the device for which the descriptor is allocated
317 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
318 *
319 * Return: 0 on success or an appropriate failure code.
320 */
msi_insert_msi_desc(struct device * dev,struct msi_desc * init_desc)321 static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
322 {
323 return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
324 }
325
326 void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
327 unsigned int first, unsigned int last);
328
329 /**
330 * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
331 * in the default irqdomain
332 *
333 * @dev: Device for which to free the descriptors
334 * @first: Index to start freeing from (inclusive)
335 * @last: Last index to be freed (inclusive)
336 */
msi_free_msi_descs_range(struct device * dev,unsigned int first,unsigned int last)337 static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
338 unsigned int last)
339 {
340 msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
341 }
342
343 /**
344 * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
345 * @dev: Device to free the descriptors
346 */
msi_free_msi_descs(struct device * dev)347 static inline void msi_free_msi_descs(struct device *dev)
348 {
349 msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
350 }
351
352 /*
353 * The arch hooks to setup up msi irqs. Default functions are implemented
354 * as weak symbols so that they /can/ be overriden by architecture specific
355 * code if needed. These hooks can only be enabled by the architecture.
356 *
357 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
358 * stubs with warnings.
359 */
360 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
361 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
362 void arch_teardown_msi_irq(unsigned int irq);
363 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
364 void arch_teardown_msi_irqs(struct pci_dev *dev);
365 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
366
367 /*
368 * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
369 * entries of MSI IRQs.
370 */
371 #if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
372 #ifdef CONFIG_SYSFS
373 int msi_device_populate_sysfs(struct device *dev);
374 void msi_device_destroy_sysfs(struct device *dev);
375 #else /* CONFIG_SYSFS */
msi_device_populate_sysfs(struct device * dev)376 static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
msi_device_destroy_sysfs(struct device * dev)377 static inline void msi_device_destroy_sysfs(struct device *dev) { }
378 #endif /* !CONFIG_SYSFS */
379 #endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
380
381 /*
382 * The restore hook is still available even for fully irq domain based
383 * setups. Courtesy to XEN/X86.
384 */
385 bool arch_restore_msi_irqs(struct pci_dev *dev);
386
387 #ifdef CONFIG_GENERIC_MSI_IRQ
388
389 #include <linux/irqhandler.h>
390
391 struct irq_domain;
392 struct irq_domain_ops;
393 struct irq_chip;
394 struct irq_fwspec;
395 struct device_node;
396 struct fwnode_handle;
397 struct msi_domain_info;
398
399 /**
400 * struct msi_domain_ops - MSI interrupt domain callbacks
401 * @get_hwirq: Retrieve the resulting hw irq number
402 * @msi_init: Domain specific init function for MSI interrupts
403 * @msi_free: Domain specific function to free a MSI interrupts
404 * @msi_prepare: Prepare the allocation of the interrupts in the domain
405 * @prepare_desc: Optional function to prepare the allocated MSI descriptor
406 * in the domain
407 * @set_desc: Set the msi descriptor for an interrupt
408 * @domain_alloc_irqs: Optional function to override the default allocation
409 * function.
410 * @domain_free_irqs: Optional function to override the default free
411 * function.
412 * @msi_post_free: Optional function which is invoked after freeing
413 * all interrupts.
414 * @msi_translate: Optional translate callback to support the odd wire to
415 * MSI bridges, e.g. MBIGEN
416 *
417 * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
418 * irqdomain.
419 *
420 * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
421 * msi_domain_alloc/free_irqs*() variants.
422 *
423 * @domain_alloc_irqs, @domain_free_irqs can be used to override the
424 * default allocation/free functions (__msi_domain_alloc/free_irqs). This
425 * is initially for a wrapper around XENs seperate MSI universe which can't
426 * be wrapped into the regular irq domains concepts by mere mortals. This
427 * allows to universally use msi_domain_alloc/free_irqs without having to
428 * special case XEN all over the place.
429 */
430 struct msi_domain_ops {
431 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
432 msi_alloc_info_t *arg);
433 int (*msi_init)(struct irq_domain *domain,
434 struct msi_domain_info *info,
435 unsigned int virq, irq_hw_number_t hwirq,
436 msi_alloc_info_t *arg);
437 void (*msi_free)(struct irq_domain *domain,
438 struct msi_domain_info *info,
439 unsigned int virq);
440 int (*msi_prepare)(struct irq_domain *domain,
441 struct device *dev, int nvec,
442 msi_alloc_info_t *arg);
443 void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
444 struct msi_desc *desc);
445 void (*set_desc)(msi_alloc_info_t *arg,
446 struct msi_desc *desc);
447 int (*domain_alloc_irqs)(struct irq_domain *domain,
448 struct device *dev, int nvec);
449 void (*domain_free_irqs)(struct irq_domain *domain,
450 struct device *dev);
451 void (*msi_post_free)(struct irq_domain *domain,
452 struct device *dev);
453 int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
454 irq_hw_number_t *hwirq, unsigned int *type);
455 };
456
457 /**
458 * struct msi_domain_info - MSI interrupt domain data
459 * @flags: Flags to decribe features and capabilities
460 * @bus_token: The domain bus token
461 * @hwsize: The hardware table size or the software index limit.
462 * If 0 then the size is considered unlimited and
463 * gets initialized to the maximum software index limit
464 * by the domain creation code.
465 * @ops: The callback data structure
466 * @chip: Optional: associated interrupt chip
467 * @chip_data: Optional: associated interrupt chip data
468 * @handler: Optional: associated interrupt flow handler
469 * @handler_data: Optional: associated interrupt flow handler data
470 * @handler_name: Optional: associated interrupt flow handler name
471 * @data: Optional: domain specific data
472 */
473 struct msi_domain_info {
474 u32 flags;
475 enum irq_domain_bus_token bus_token;
476 unsigned int hwsize;
477 struct msi_domain_ops *ops;
478 struct irq_chip *chip;
479 void *chip_data;
480 irq_flow_handler_t handler;
481 void *handler_data;
482 const char *handler_name;
483 void *data;
484 };
485
486 /**
487 * struct msi_domain_template - Template for MSI device domains
488 * @name: Storage for the resulting name. Filled in by the core.
489 * @chip: Interrupt chip for this domain
490 * @ops: MSI domain ops
491 * @info: MSI domain info data
492 */
493 struct msi_domain_template {
494 char name[48];
495 struct irq_chip chip;
496 struct msi_domain_ops ops;
497 struct msi_domain_info info;
498 };
499
500 /*
501 * Flags for msi_domain_info
502 *
503 * Bit 0-15: Generic MSI functionality which is not subject to restriction
504 * by parent domains
505 *
506 * Bit 16-31: Functionality which depends on the underlying parent domain and
507 * can be masked out by msi_parent_ops::init_dev_msi_info() when
508 * a device MSI domain is initialized.
509 */
510 enum {
511 /*
512 * Init non implemented ops callbacks with default MSI domain
513 * callbacks.
514 */
515 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
516 /*
517 * Init non implemented chip callbacks with default MSI chip
518 * callbacks.
519 */
520 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
521 /* Needs early activate, required for PCI */
522 MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
523 /*
524 * Must reactivate when irq is started even when
525 * MSI_FLAG_ACTIVATE_EARLY has been set.
526 */
527 MSI_FLAG_MUST_REACTIVATE = (1 << 3),
528 /* Populate sysfs on alloc() and destroy it on free() */
529 MSI_FLAG_DEV_SYSFS = (1 << 4),
530 /* Allocate simple MSI descriptors */
531 MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
532 /* Free MSI descriptors */
533 MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
534 /* Use dev->fwnode for MSI device domain creation */
535 MSI_FLAG_USE_DEV_FWNODE = (1 << 7),
536 /* Set parent->dev into domain->pm_dev on device domain creation */
537 MSI_FLAG_PARENT_PM_DEV = (1 << 8),
538 /* Support for parent mask/unmask */
539 MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
540
541 /* Mask for the generic functionality */
542 MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
543
544 /* Mask for the domain specific functionality */
545 MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
546
547 /* Support multiple PCI MSI interrupts */
548 MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
549 /* Support PCI MSIX interrupts */
550 MSI_FLAG_PCI_MSIX = (1 << 17),
551 /* Is level-triggered capable, using two messages */
552 MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
553 /* MSI-X entries must be contiguous */
554 MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
555 /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
556 MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
557 /* PCI MSIs cannot be steered separately to CPU cores */
558 MSI_FLAG_NO_AFFINITY = (1 << 21),
559 };
560
561 /**
562 * struct msi_parent_ops - MSI parent domain callbacks and configuration info
563 *
564 * @supported_flags: Required: The supported MSI flags of the parent domain
565 * @required_flags: Optional: The required MSI flags of the parent MSI domain
566 * @bus_select_token: Optional: The bus token of the real parent domain for
567 * irq_domain::select()
568 * @bus_select_mask: Optional: A mask of supported BUS_DOMAINs for
569 * irq_domain::select()
570 * @prefix: Optional: Prefix for the domain and chip name
571 * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
572 * domain specific domain flags, domain ops and interrupt chip
573 * callbacks when a per device domain is created.
574 */
575 struct msi_parent_ops {
576 u32 supported_flags;
577 u32 required_flags;
578 u32 bus_select_token;
579 u32 bus_select_mask;
580 const char *prefix;
581 bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
582 struct irq_domain *msi_parent_domain,
583 struct msi_domain_info *msi_child_info);
584 };
585
586 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
587 struct irq_domain *msi_parent_domain,
588 struct msi_domain_info *msi_child_info);
589
590 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
591 bool force);
592
593 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
594 struct msi_domain_info *info,
595 struct irq_domain *parent);
596
597 bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
598 const struct msi_domain_template *template,
599 unsigned int hwsize, void *domain_data,
600 void *chip_data);
601 void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
602
603 bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
604 enum irq_domain_bus_token bus_token);
605
606 int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
607 unsigned int first, unsigned int last);
608 int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
609 unsigned int first, unsigned int last);
610 int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
611
612 struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
613 const struct irq_affinity_desc *affdesc,
614 union msi_instance_cookie *cookie);
615
616 void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
617 unsigned int first, unsigned int last);
618 void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
619 unsigned int first, unsigned int last);
620 void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
621 void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
622
623 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
624
625 /* Per device platform MSI */
626 int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
627 irq_write_msi_msg_t write_msi_msg);
628 void platform_device_msi_free_irqs_all(struct device *dev);
629
630 bool msi_device_has_isolated_msi(struct device *dev);
631
msi_domain_alloc_irqs(struct device * dev,unsigned int domid,int nirqs)632 static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs)
633 {
634 return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1);
635 }
636
637 #else /* CONFIG_GENERIC_MSI_IRQ */
msi_device_has_isolated_msi(struct device * dev)638 static inline bool msi_device_has_isolated_msi(struct device *dev)
639 {
640 /*
641 * Arguably if the platform does not enable MSI support then it has
642 * "isolated MSI", as an interrupt controller that cannot receive MSIs
643 * is inherently isolated by our definition. The default definition for
644 * arch_is_isolated_msi() is conservative and returns false anyhow.
645 */
646 return arch_is_isolated_msi();
647 }
648 #endif /* CONFIG_GENERIC_MSI_IRQ */
649
650 /* PCI specific interfaces */
651 #ifdef CONFIG_PCI_MSI
652 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
653 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
654 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
655 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
656 void pci_msi_mask_irq(struct irq_data *data);
657 void pci_msi_unmask_irq(struct irq_data *data);
658 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
659 struct msi_domain_info *info,
660 struct irq_domain *parent);
661 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
662 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
663 #else /* CONFIG_PCI_MSI */
pci_msi_get_device_domain(struct pci_dev * pdev)664 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
665 {
666 return NULL;
667 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)668 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
669 #endif /* !CONFIG_PCI_MSI */
670
671 #endif /* LINUX_MSI_H */
672