xref: /linux/include/linux/msi.h (revision 44ed0f35df343d00b8d38006854f96e333104a66)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4 
5 /*
6  * This header file contains MSI data structures and functions which are
7  * only relevant for:
8  *	- Interrupt core code
9  *	- PCI/MSI core code
10  *	- MSI interrupt domain implementations
11  *	- IOMMU, low level VFIO, NTB and other justified exceptions
12  *	  dealing with low level MSI details.
13  *
14  * Regular device drivers have no business with any of these functions and
15  * especially storing MSI descriptor pointers in random code is considered
16  * abuse.
17  *
18  * Device driver relevant functions are available in <linux/msi_api.h>
19  */
20 
21 #include <linux/irqdomain_defs.h>
22 #include <linux/cpumask_types.h>
23 #include <linux/msi_api.h>
24 #include <linux/irq.h>
25 
26 #include <asm/msi.h>
27 
28 /* Dummy shadow structures if an architecture does not define them */
29 #ifndef arch_msi_msg_addr_lo
30 typedef struct arch_msi_msg_addr_lo {
31 	u32	address_lo;
32 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
33 #endif
34 
35 #ifndef arch_msi_msg_addr_hi
36 typedef struct arch_msi_msg_addr_hi {
37 	u32	address_hi;
38 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
39 #endif
40 
41 #ifndef arch_msi_msg_data
42 typedef struct arch_msi_msg_data {
43 	u32	data;
44 } __attribute__ ((packed)) arch_msi_msg_data_t;
45 #endif
46 
47 #ifndef arch_is_isolated_msi
48 #define arch_is_isolated_msi() false
49 #endif
50 
51 /**
52  * msi_msg - Representation of a MSI message
53  * @address_lo:		Low 32 bits of msi message address
54  * @arch_addrlo:	Architecture specific shadow of @address_lo
55  * @address_hi:		High 32 bits of msi message address
56  *			(only used when device supports it)
57  * @arch_addrhi:	Architecture specific shadow of @address_hi
58  * @data:		MSI message data (usually 16 bits)
59  * @arch_data:		Architecture specific shadow of @data
60  */
61 struct msi_msg {
62 	union {
63 		u32			address_lo;
64 		arch_msi_msg_addr_lo_t	arch_addr_lo;
65 	};
66 	union {
67 		u32			address_hi;
68 		arch_msi_msg_addr_hi_t	arch_addr_hi;
69 	};
70 	union {
71 		u32			data;
72 		arch_msi_msg_data_t	arch_data;
73 	};
74 };
75 
76 /* Helper functions */
77 struct msi_desc;
78 struct pci_dev;
79 struct device_attribute;
80 struct irq_domain;
81 struct irq_affinity_desc;
82 
83 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
84 #ifdef CONFIG_GENERIC_MSI_IRQ
85 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
86 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)87 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
88 #endif
89 
90 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
91 				    struct msi_msg *msg);
92 
93 /**
94  * pci_msi_desc - PCI/MSI specific MSI descriptor data
95  *
96  * @msi_mask:	[PCI MSI]   MSI cached mask bits
97  * @msix_ctrl:	[PCI MSI-X] MSI-X cached per vector control bits
98  * @is_msix:	[PCI MSI/X] True if MSI-X
99  * @multiple:	[PCI MSI/X] log2 num of messages allocated
100  * @multi_cap:	[PCI MSI/X] log2 num of messages supported
101  * @can_mask:	[PCI MSI/X] Masking supported?
102  * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
103  * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
104  * @mask_pos:	[PCI MSI]   Mask register position
105  * @mask_base:	[PCI MSI-X] Mask register base address
106  */
107 struct pci_msi_desc {
108 	union {
109 		u32 msi_mask;
110 		u32 msix_ctrl;
111 	};
112 	struct {
113 		u8	is_msix		: 1;
114 		u8	multiple	: 3;
115 		u8	multi_cap	: 3;
116 		u8	can_mask	: 1;
117 		u8	is_64		: 1;
118 		u8	is_virtual	: 1;
119 		unsigned default_irq;
120 	} msi_attrib;
121 	union {
122 		u8	mask_pos;
123 		void __iomem *mask_base;
124 	};
125 };
126 
127 /**
128  * union msi_domain_cookie - Opaque MSI domain specific data
129  * @value:	u64 value store
130  * @ptr:	Pointer to domain specific data
131  * @iobase:	Domain specific IOmem pointer
132  *
133  * The content of this data is implementation defined and used by the MSI
134  * domain to store domain specific information which is requried for
135  * interrupt chip callbacks.
136  */
137 union msi_domain_cookie {
138 	u64	value;
139 	void	*ptr;
140 	void	__iomem *iobase;
141 };
142 
143 /**
144  * struct msi_desc_data - Generic MSI descriptor data
145  * @dcookie:	Cookie for MSI domain specific data which is required
146  *		for irq_chip callbacks
147  * @icookie:	Cookie for the MSI interrupt instance provided by
148  *		the usage site to the allocation function
149  *
150  * The content of this data is implementation defined, e.g. PCI/IMS
151  * implementations define the meaning of the data. The MSI core ignores
152  * this data completely.
153  */
154 struct msi_desc_data {
155 	union msi_domain_cookie		dcookie;
156 	union msi_instance_cookie	icookie;
157 };
158 
159 #define MSI_MAX_INDEX		((unsigned int)USHRT_MAX)
160 
161 /**
162  * struct msi_desc - Descriptor structure for MSI based interrupts
163  * @irq:	The base interrupt number
164  * @nvec_used:	The number of vectors used
165  * @dev:	Pointer to the device which uses this descriptor
166  * @msg:	The last set MSI message cached for reuse
167  * @affinity:	Optional pointer to a cpu affinity mask for this descriptor
168  * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr.
169  *                  Only used if iommu_msi_shift != 0
170  * @iommu_msi_shift: Indicates how many bits of the original address should be
171  *                   preserved when using iommu_msi_iova.
172  * @sysfs_attr:	Pointer to sysfs device attribute
173  *
174  * @write_msi_msg:	Callback that may be called when the MSI message
175  *			address or data changes
176  * @write_msi_msg_data:	Data parameter for the callback.
177  *
178  * @msi_index:	Index of the msi descriptor
179  * @pci:	PCI specific msi descriptor data
180  * @data:	Generic MSI descriptor data
181  */
182 struct msi_desc {
183 	/* Shared device/bus type independent data */
184 	unsigned int			irq;
185 	unsigned int			nvec_used;
186 	struct device			*dev;
187 	struct msi_msg			msg;
188 	struct irq_affinity_desc	*affinity;
189 #ifdef CONFIG_IRQ_MSI_IOMMU
190 	u64				iommu_msi_iova : 58;
191 	u64				iommu_msi_shift : 6;
192 #endif
193 #ifdef CONFIG_SYSFS
194 	struct device_attribute		*sysfs_attrs;
195 #endif
196 
197 	void (*write_msi_msg)(struct msi_desc *entry, void *data);
198 	void *write_msi_msg_data;
199 
200 	u16				msi_index;
201 	union {
202 		struct pci_msi_desc	pci;
203 		struct msi_desc_data	data;
204 	};
205 };
206 
207 /*
208  * Filter values for the MSI descriptor iterators and accessor functions.
209  */
210 enum msi_desc_filter {
211 	/* All descriptors */
212 	MSI_DESC_ALL,
213 	/* Descriptors which have no interrupt associated */
214 	MSI_DESC_NOTASSOCIATED,
215 	/* Descriptors which have an interrupt associated */
216 	MSI_DESC_ASSOCIATED,
217 };
218 
219 
220 /**
221  * struct msi_dev_domain - The internals of MSI domain info per device
222  * @store:		Xarray for storing MSI descriptor pointers
223  * @irqdomain:		Pointer to a per device interrupt domain
224  */
225 struct msi_dev_domain {
226 	struct xarray		store;
227 	struct irq_domain	*domain;
228 };
229 
230 int msi_setup_device_data(struct device *dev);
231 
232 void __msi_lock_descs(struct device *dev);
233 void __msi_unlock_descs(struct device *dev);
234 
235 DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock),
236 		    __msi_unlock_descs(_T->lock));
237 
238 struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
239 				       enum msi_desc_filter filter);
240 
241 /**
242  * msi_first_desc - Get the first MSI descriptor of the default irqdomain
243  * @dev:	Device to operate on
244  * @filter:	Descriptor state filter
245  *
246  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
247  * must be invoked before the call.
248  *
249  * Return: Pointer to the first MSI descriptor matching the search
250  *	   criteria, NULL if none found.
251  */
msi_first_desc(struct device * dev,enum msi_desc_filter filter)252 static inline struct msi_desc *msi_first_desc(struct device *dev,
253 					      enum msi_desc_filter filter)
254 {
255 	return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
256 }
257 
258 struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
259 			       enum msi_desc_filter filter);
260 
261 /**
262  * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
263  *
264  * @desc:	struct msi_desc pointer used as iterator
265  * @dev:	struct device pointer - device to iterate
266  * @domid:	The id of the interrupt domain which should be walked.
267  * @filter:	Filter for descriptor selection
268  *
269  * Notes:
270  *  - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
271  *    pair.
272  *  - It is safe to remove a retrieved MSI descriptor in the loop.
273  */
274 #define msi_domain_for_each_desc(desc, dev, domid, filter)			\
275 	for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc);	\
276 	     (desc) = msi_next_desc((dev), (domid), (filter)))
277 
278 /**
279  * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
280  *
281  * @desc:	struct msi_desc pointer used as iterator
282  * @dev:	struct device pointer - device to iterate
283  * @filter:	Filter for descriptor selection
284  *
285  * Notes:
286  *  - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
287  *    pair.
288  *  - It is safe to remove a retrieved MSI descriptor in the loop.
289  */
290 #define msi_for_each_desc(desc, dev, filter)					\
291 	msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
292 
293 #define msi_desc_to_dev(desc)		((desc)->dev)
294 
msi_desc_set_iommu_msi_iova(struct msi_desc * desc,u64 msi_iova,unsigned int msi_shift)295 static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova,
296 					       unsigned int msi_shift)
297 {
298 #ifdef CONFIG_IRQ_MSI_IOMMU
299 	desc->iommu_msi_iova = msi_iova >> msi_shift;
300 	desc->iommu_msi_shift = msi_shift;
301 #endif
302 }
303 
304 /**
305  * msi_msg_set_addr() - Set MSI address in an MSI message
306  *
307  * @desc:	MSI descriptor that may carry an IOVA base address for MSI via @iommu_msi_iova/shift
308  * @msg:	Target MSI message to set its address_hi and address_lo
309  * @msi_addr:	Physical address to set the MSI message
310  *
311  * Notes:
312  *  - Override @msi_addr using the IOVA base address in the @desc if @iommu_msi_shift is set
313  *  - Otherwise, simply set @msi_addr to @msg
314  */
msi_msg_set_addr(struct msi_desc * desc,struct msi_msg * msg,phys_addr_t msi_addr)315 static inline void msi_msg_set_addr(struct msi_desc *desc, struct msi_msg *msg,
316 				    phys_addr_t msi_addr)
317 {
318 #ifdef CONFIG_IRQ_MSI_IOMMU
319 	if (desc->iommu_msi_shift) {
320 		u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
321 
322 		msg->address_hi = upper_32_bits(msi_iova);
323 		msg->address_lo = lower_32_bits(msi_iova) |
324 				  (msi_addr & ((1 << desc->iommu_msi_shift) - 1));
325 		return;
326 	}
327 #endif
328 	msg->address_hi = upper_32_bits(msi_addr);
329 	msg->address_lo = lower_32_bits(msi_addr);
330 }
331 
332 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
333 			       struct msi_desc *init_desc);
334 /**
335  * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
336  *			 default irqdomain and insert it at @init_desc->msi_index
337  * @dev:	Pointer to the device for which the descriptor is allocated
338  * @init_desc:	Pointer to an MSI descriptor to initialize the new descriptor
339  *
340  * Return: 0 on success or an appropriate failure code.
341  */
msi_insert_msi_desc(struct device * dev,struct msi_desc * init_desc)342 static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
343 {
344 	return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
345 }
346 
347 void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
348 				     unsigned int first, unsigned int last);
349 
350 /**
351  * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
352  *			      in the default irqdomain
353  *
354  * @dev:	Device for which to free the descriptors
355  * @first:	Index to start freeing from (inclusive)
356  * @last:	Last index to be freed (inclusive)
357  */
msi_free_msi_descs_range(struct device * dev,unsigned int first,unsigned int last)358 static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
359 					    unsigned int last)
360 {
361 	msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
362 }
363 
364 /**
365  * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
366  * @dev:	Device to free the descriptors
367  */
msi_free_msi_descs(struct device * dev)368 static inline void msi_free_msi_descs(struct device *dev)
369 {
370 	msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
371 }
372 
373 /*
374  * The arch hooks to setup up msi irqs. Default functions are implemented
375  * as weak symbols so that they /can/ be overriden by architecture specific
376  * code if needed. These hooks can only be enabled by the architecture.
377  *
378  * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
379  * stubs with warnings.
380  */
381 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
382 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
383 void arch_teardown_msi_irq(unsigned int irq);
384 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
385 void arch_teardown_msi_irqs(struct pci_dev *dev);
386 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
387 
388 /*
389  * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
390  * entries of MSI IRQs.
391  */
392 #if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
393 #ifdef CONFIG_SYSFS
394 int msi_device_populate_sysfs(struct device *dev);
395 void msi_device_destroy_sysfs(struct device *dev);
396 #else /* CONFIG_SYSFS */
msi_device_populate_sysfs(struct device * dev)397 static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
msi_device_destroy_sysfs(struct device * dev)398 static inline void msi_device_destroy_sysfs(struct device *dev) { }
399 #endif /* !CONFIG_SYSFS */
400 #endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
401 
402 /*
403  * The restore hook is still available even for fully irq domain based
404  * setups. Courtesy to XEN/X86.
405  */
406 bool arch_restore_msi_irqs(struct pci_dev *dev);
407 
408 #ifdef CONFIG_GENERIC_MSI_IRQ
409 
410 #include <linux/irqhandler.h>
411 
412 struct irq_domain;
413 struct irq_domain_ops;
414 struct irq_chip;
415 struct irq_fwspec;
416 struct device_node;
417 struct fwnode_handle;
418 struct msi_domain_info;
419 
420 /**
421  * struct msi_domain_ops - MSI interrupt domain callbacks
422  * @get_hwirq:		Retrieve the resulting hw irq number
423  * @msi_init:		Domain specific init function for MSI interrupts
424  * @msi_free:		Domain specific function to free a MSI interrupts
425  * @msi_prepare:	Prepare the allocation of the interrupts in the domain
426  * @msi_teardown:	Reverse the effects of @msi_prepare
427  * @prepare_desc:	Optional function to prepare the allocated MSI descriptor
428  *			in the domain
429  * @set_desc:		Set the msi descriptor for an interrupt
430  * @domain_alloc_irqs:	Optional function to override the default allocation
431  *			function.
432  * @domain_free_irqs:	Optional function to override the default free
433  *			function.
434  * @msi_post_free:	Optional function which is invoked after freeing
435  *			all interrupts.
436  * @msi_translate:	Optional translate callback to support the odd wire to
437  *			MSI bridges, e.g. MBIGEN
438  *
439  * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
440  * irqdomain.
441  *
442  * @msi_check, @msi_prepare, @msi_teardown, @prepare_desc and
443  * @set_desc are callbacks used by the msi_domain_alloc/free_irqs*()
444  * variants.
445  *
446  * @domain_alloc_irqs, @domain_free_irqs can be used to override the
447  * default allocation/free functions (__msi_domain_alloc/free_irqs). This
448  * is initially for a wrapper around XENs seperate MSI universe which can't
449  * be wrapped into the regular irq domains concepts by mere mortals.  This
450  * allows to universally use msi_domain_alloc/free_irqs without having to
451  * special case XEN all over the place.
452  */
453 struct msi_domain_ops {
454 	irq_hw_number_t	(*get_hwirq)(struct msi_domain_info *info,
455 				     msi_alloc_info_t *arg);
456 	int		(*msi_init)(struct irq_domain *domain,
457 				    struct msi_domain_info *info,
458 				    unsigned int virq, irq_hw_number_t hwirq,
459 				    msi_alloc_info_t *arg);
460 	void		(*msi_free)(struct irq_domain *domain,
461 				    struct msi_domain_info *info,
462 				    unsigned int virq);
463 	int		(*msi_prepare)(struct irq_domain *domain,
464 				       struct device *dev, int nvec,
465 				       msi_alloc_info_t *arg);
466 	void		(*msi_teardown)(struct irq_domain *domain,
467 					msi_alloc_info_t *arg);
468 	void		(*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
469 					struct msi_desc *desc);
470 	void		(*set_desc)(msi_alloc_info_t *arg,
471 				    struct msi_desc *desc);
472 	int		(*domain_alloc_irqs)(struct irq_domain *domain,
473 					     struct device *dev, int nvec);
474 	void		(*domain_free_irqs)(struct irq_domain *domain,
475 					    struct device *dev);
476 	void		(*msi_post_free)(struct irq_domain *domain,
477 					 struct device *dev);
478 	int		(*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
479 					 irq_hw_number_t *hwirq, unsigned int *type);
480 };
481 
482 /**
483  * struct msi_domain_info - MSI interrupt domain data
484  * @flags:		Flags to decribe features and capabilities
485  * @bus_token:		The domain bus token
486  * @hwsize:		The hardware table size or the software index limit.
487  *			If 0 then the size is considered unlimited and
488  *			gets initialized to the maximum software index limit
489  *			by the domain creation code.
490  * @ops:		The callback data structure
491  * @chip:		Optional: associated interrupt chip
492  * @chip_data:		Optional: associated interrupt chip data
493  * @handler:		Optional: associated interrupt flow handler
494  * @handler_data:	Optional: associated interrupt flow handler data
495  * @handler_name:	Optional: associated interrupt flow handler name
496  * @alloc_data:		Optional: associated interrupt allocation data
497  * @data:		Optional: domain specific data
498  */
499 struct msi_domain_info {
500 	u32				flags;
501 	enum irq_domain_bus_token	bus_token;
502 	unsigned int			hwsize;
503 	struct msi_domain_ops		*ops;
504 	struct irq_chip			*chip;
505 	void				*chip_data;
506 	irq_flow_handler_t		handler;
507 	void				*handler_data;
508 	const char			*handler_name;
509 	msi_alloc_info_t		*alloc_data;
510 	void				*data;
511 };
512 
513 /**
514  * struct msi_domain_template - Template for MSI device domains
515  * @name:	Storage for the resulting name. Filled in by the core.
516  * @chip:	Interrupt chip for this domain
517  * @ops:	MSI domain ops
518  * @info:	MSI domain info data
519  * @alloc_info:	MSI domain allocation data (architecture specific)
520  */
521 struct msi_domain_template {
522 	char			name[48];
523 	struct irq_chip		chip;
524 	struct msi_domain_ops	ops;
525 	struct msi_domain_info	info;
526 	msi_alloc_info_t	alloc_info;
527 };
528 
529 /*
530  * Flags for msi_domain_info
531  *
532  * Bit 0-15:	Generic MSI functionality which is not subject to restriction
533  *		by parent domains
534  *
535  * Bit 16-31:	Functionality which depends on the underlying parent domain and
536  *		can be masked out by msi_parent_ops::init_dev_msi_info() when
537  *		a device MSI domain is initialized.
538  */
539 enum {
540 	/*
541 	 * Init non implemented ops callbacks with default MSI domain
542 	 * callbacks.
543 	 */
544 	MSI_FLAG_USE_DEF_DOM_OPS	= (1 << 0),
545 	/*
546 	 * Init non implemented chip callbacks with default MSI chip
547 	 * callbacks.
548 	 */
549 	MSI_FLAG_USE_DEF_CHIP_OPS	= (1 << 1),
550 	/* Needs early activate, required for PCI */
551 	MSI_FLAG_ACTIVATE_EARLY		= (1 << 2),
552 	/*
553 	 * Must reactivate when irq is started even when
554 	 * MSI_FLAG_ACTIVATE_EARLY has been set.
555 	 */
556 	MSI_FLAG_MUST_REACTIVATE	= (1 << 3),
557 	/* Populate sysfs on alloc() and destroy it on free() */
558 	MSI_FLAG_DEV_SYSFS		= (1 << 4),
559 	/* Allocate simple MSI descriptors */
560 	MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS	= (1 << 5),
561 	/* Free MSI descriptors */
562 	MSI_FLAG_FREE_MSI_DESCS		= (1 << 6),
563 	/* Use dev->fwnode for MSI device domain creation */
564 	MSI_FLAG_USE_DEV_FWNODE		= (1 << 7),
565 	/* Set parent->dev into domain->pm_dev on device domain creation */
566 	MSI_FLAG_PARENT_PM_DEV		= (1 << 8),
567 	/* Support for parent mask/unmask */
568 	MSI_FLAG_PCI_MSI_MASK_PARENT	= (1 << 9),
569 
570 	/* Mask for the generic functionality */
571 	MSI_GENERIC_FLAGS_MASK		= GENMASK(15, 0),
572 
573 	/* Mask for the domain specific functionality */
574 	MSI_DOMAIN_FLAGS_MASK		= GENMASK(31, 16),
575 
576 	/* Support multiple PCI MSI interrupts */
577 	MSI_FLAG_MULTI_PCI_MSI		= (1 << 16),
578 	/* Support PCI MSIX interrupts */
579 	MSI_FLAG_PCI_MSIX		= (1 << 17),
580 	/* Is level-triggered capable, using two messages */
581 	MSI_FLAG_LEVEL_CAPABLE		= (1 << 18),
582 	/* MSI-X entries must be contiguous */
583 	MSI_FLAG_MSIX_CONTIGUOUS	= (1 << 19),
584 	/* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
585 	MSI_FLAG_PCI_MSIX_ALLOC_DYN	= (1 << 20),
586 	/* PCI MSIs cannot be steered separately to CPU cores */
587 	MSI_FLAG_NO_AFFINITY		= (1 << 21),
588 	/* Inhibit usage of entry masking */
589 	MSI_FLAG_NO_MASK		= (1 << 22),
590 };
591 
592 /*
593  * Flags for msi_parent_ops::chip_flags
594  */
595 enum {
596 	MSI_CHIP_FLAG_SET_EOI		= (1 << 0),
597 	MSI_CHIP_FLAG_SET_ACK		= (1 << 1),
598 };
599 
600 /**
601  * struct msi_parent_ops - MSI parent domain callbacks and configuration info
602  *
603  * @supported_flags:	Required: The supported MSI flags of the parent domain
604  * @required_flags:	Optional: The required MSI flags of the parent MSI domain
605  * @chip_flags:		Optional: Select MSI chip callbacks to update with defaults
606  *			in msi_lib_init_dev_msi_info().
607  * @bus_select_token:	Optional: The bus token of the real parent domain for
608  *			irq_domain::select()
609  * @bus_select_mask:	Optional: A mask of supported BUS_DOMAINs for
610  *			irq_domain::select()
611  * @prefix:		Optional: Prefix for the domain and chip name
612  * @init_dev_msi_info:	Required: Callback for MSI parent domains to setup parent
613  *			domain specific domain flags, domain ops and interrupt chip
614  *			callbacks when a per device domain is created.
615  */
616 struct msi_parent_ops {
617 	u32		supported_flags;
618 	u32		required_flags;
619 	u32		chip_flags;
620 	u32		bus_select_token;
621 	u32		bus_select_mask;
622 	const char	*prefix;
623 	bool		(*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
624 					     struct irq_domain *msi_parent_domain,
625 					     struct msi_domain_info *msi_child_info);
626 };
627 
628 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
629 				  struct irq_domain *msi_parent_domain,
630 				  struct msi_domain_info *msi_child_info);
631 
632 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
633 			    bool force);
634 
635 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
636 					 struct msi_domain_info *info,
637 					 struct irq_domain *parent);
638 
639 struct irq_domain_info;
640 struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info,
641 						const struct msi_parent_ops *msi_parent_ops);
642 
643 bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
644 				  const struct msi_domain_template *template,
645 				  unsigned int hwsize, void *domain_data,
646 				  void *chip_data);
647 void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
648 
649 bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
650 				 enum irq_domain_bus_token bus_token);
651 
652 int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
653 				       unsigned int first, unsigned int last);
654 int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
655 				unsigned int first, unsigned int last);
656 int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
657 
658 struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
659 				       const struct irq_affinity_desc *affdesc,
660 				       union msi_instance_cookie *cookie);
661 
662 void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
663 				       unsigned int first, unsigned int last);
664 void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
665 				unsigned int first, unsigned int last);
666 void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
667 void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
668 
669 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
670 
671 /* Per device platform MSI */
672 int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
673 					    irq_write_msi_msg_t write_msi_msg);
674 void platform_device_msi_free_irqs_all(struct device *dev);
675 
676 bool msi_device_has_isolated_msi(struct device *dev);
677 
msi_domain_alloc_irqs(struct device * dev,unsigned int domid,int nirqs)678 static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs)
679 {
680 	return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1);
681 }
682 
683 #else /* CONFIG_GENERIC_MSI_IRQ */
msi_device_has_isolated_msi(struct device * dev)684 static inline bool msi_device_has_isolated_msi(struct device *dev)
685 {
686 	/*
687 	 * Arguably if the platform does not enable MSI support then it has
688 	 * "isolated MSI", as an interrupt controller that cannot receive MSIs
689 	 * is inherently isolated by our definition. The default definition for
690 	 * arch_is_isolated_msi() is conservative and returns false anyhow.
691 	 */
692 	return arch_is_isolated_msi();
693 }
694 #endif /* CONFIG_GENERIC_MSI_IRQ */
695 
696 /* PCI specific interfaces */
697 #ifdef CONFIG_PCI_MSI
698 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
699 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
700 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
701 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
702 void pci_msi_mask_irq(struct irq_data *data);
703 void pci_msi_unmask_irq(struct irq_data *data);
704 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
705 					     struct msi_domain_info *info,
706 					     struct irq_domain *parent);
707 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
708 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
709 #else /* CONFIG_PCI_MSI */
pci_msi_get_device_domain(struct pci_dev * pdev)710 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
711 {
712 	return NULL;
713 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)714 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
715 #endif /* !CONFIG_PCI_MSI */
716 
717 #endif /* LINUX_MSI_H */
718