xref: /linux/include/linux/pci-p2pdma.h (revision 6ef04555b252e60913d3dc80b45f048bfef33d0f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * PCI Peer 2 Peer DMA support.
4  *
5  * Copyright (c) 2016-2018, Logan Gunthorpe
6  * Copyright (c) 2016-2017, Microsemi Corporation
7  * Copyright (c) 2017, Christoph Hellwig
8  * Copyright (c) 2018, Eideticom Inc.
9  */
10 
11 #ifndef _LINUX_PCI_P2PDMA_H
12 #define _LINUX_PCI_P2PDMA_H
13 
14 #include <linux/pci.h>
15 
16 struct block_device;
17 struct scatterlist;
18 
19 #ifdef CONFIG_PCI_P2PDMA
20 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
21 		u64 offset);
22 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
23 			     int num_clients, bool verbose);
24 bool pci_has_p2pmem(struct pci_dev *pdev);
25 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
26 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
27 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
28 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr);
29 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
30 					 unsigned int *nents, u32 length);
31 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
32 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
33 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
34 			    bool *use_p2pdma);
35 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
36 			       bool use_p2pdma);
37 #else /* CONFIG_PCI_P2PDMA */
pci_p2pdma_add_resource(struct pci_dev * pdev,int bar,size_t size,u64 offset)38 static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
39 		size_t size, u64 offset)
40 {
41 	return -EOPNOTSUPP;
42 }
pci_p2pdma_distance_many(struct pci_dev * provider,struct device ** clients,int num_clients,bool verbose)43 static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
44 	struct device **clients, int num_clients, bool verbose)
45 {
46 	return -1;
47 }
pci_has_p2pmem(struct pci_dev * pdev)48 static inline bool pci_has_p2pmem(struct pci_dev *pdev)
49 {
50 	return false;
51 }
pci_p2pmem_find_many(struct device ** clients,int num_clients)52 static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
53 						   int num_clients)
54 {
55 	return NULL;
56 }
pci_alloc_p2pmem(struct pci_dev * pdev,size_t size)57 static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
58 {
59 	return NULL;
60 }
pci_free_p2pmem(struct pci_dev * pdev,void * addr,size_t size)61 static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr,
62 		size_t size)
63 {
64 }
pci_p2pmem_virt_to_bus(struct pci_dev * pdev,void * addr)65 static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev,
66 						    void *addr)
67 {
68 	return 0;
69 }
pci_p2pmem_alloc_sgl(struct pci_dev * pdev,unsigned int * nents,u32 length)70 static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
71 		unsigned int *nents, u32 length)
72 {
73 	return NULL;
74 }
pci_p2pmem_free_sgl(struct pci_dev * pdev,struct scatterlist * sgl)75 static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
76 		struct scatterlist *sgl)
77 {
78 }
pci_p2pmem_publish(struct pci_dev * pdev,bool publish)79 static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
80 {
81 }
pci_p2pdma_enable_store(const char * page,struct pci_dev ** p2p_dev,bool * use_p2pdma)82 static inline int pci_p2pdma_enable_store(const char *page,
83 		struct pci_dev **p2p_dev, bool *use_p2pdma)
84 {
85 	*use_p2pdma = false;
86 	return 0;
87 }
pci_p2pdma_enable_show(char * page,struct pci_dev * p2p_dev,bool use_p2pdma)88 static inline ssize_t pci_p2pdma_enable_show(char *page,
89 		struct pci_dev *p2p_dev, bool use_p2pdma)
90 {
91 	return sprintf(page, "none\n");
92 }
93 #endif /* CONFIG_PCI_P2PDMA */
94 
95 
pci_p2pdma_distance(struct pci_dev * provider,struct device * client,bool verbose)96 static inline int pci_p2pdma_distance(struct pci_dev *provider,
97 	struct device *client, bool verbose)
98 {
99 	return pci_p2pdma_distance_many(provider, &client, 1, verbose);
100 }
101 
pci_p2pmem_find(struct device * client)102 static inline struct pci_dev *pci_p2pmem_find(struct device *client)
103 {
104 	return pci_p2pmem_find_many(&client, 1);
105 }
106 
107 enum pci_p2pdma_map_type {
108 	/*
109 	 * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before
110 	 * the mapping type has been calculated. Exported routines for the API
111 	 * will never return this value.
112 	 */
113 	PCI_P2PDMA_MAP_UNKNOWN = 0,
114 
115 	/*
116 	 * Not a PCI P2PDMA transfer.
117 	 */
118 	PCI_P2PDMA_MAP_NONE,
119 
120 	/*
121 	 * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
122 	 * traverse the host bridge and the host bridge is not in the
123 	 * allowlist. DMA Mapping routines should return an error when
124 	 * this is returned.
125 	 */
126 	PCI_P2PDMA_MAP_NOT_SUPPORTED,
127 
128 	/*
129 	 * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to
130 	 * each other directly through a PCI switch and the transaction will
131 	 * not traverse the host bridge. Such a mapping should program
132 	 * the DMA engine with PCI bus addresses.
133 	 */
134 	PCI_P2PDMA_MAP_BUS_ADDR,
135 
136 	/*
137 	 * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
138 	 * to each other, but the transaction traverses a host bridge on the
139 	 * allowlist. In this case, a normal mapping either with CPU physical
140 	 * addresses (in the case of dma-direct) or IOVA addresses (in the
141 	 * case of IOMMUs) should be used to program the DMA engine.
142 	 */
143 	PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
144 };
145 
146 struct pci_p2pdma_map_state {
147 	struct dev_pagemap *pgmap;
148 	enum pci_p2pdma_map_type map;
149 	u64 bus_off;
150 };
151 
152 /* helper for pci_p2pdma_state(), do not use directly */
153 void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
154 		struct device *dev, struct page *page);
155 
156 /**
157  * pci_p2pdma_state - check the P2P transfer state of a page
158  * @state:	P2P state structure
159  * @dev:	device to transfer to/from
160  * @page:	page to map
161  *
162  * Check if @page is a PCI P2PDMA page, and if yes of what kind.  Returns the
163  * map type, and updates @state with all information needed for a P2P transfer.
164  */
165 static inline enum pci_p2pdma_map_type
pci_p2pdma_state(struct pci_p2pdma_map_state * state,struct device * dev,struct page * page)166 pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev,
167 		struct page *page)
168 {
169 	if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
170 		if (state->pgmap != page_pgmap(page))
171 			__pci_p2pdma_update_state(state, dev, page);
172 		return state->map;
173 	}
174 	return PCI_P2PDMA_MAP_NONE;
175 }
176 
177 /**
178  * pci_p2pdma_bus_addr_map - Translate a physical address to a bus address
179  *			     for a PCI_P2PDMA_MAP_BUS_ADDR transfer.
180  * @state:	P2P state structure
181  * @paddr:	physical address to map
182  *
183  * Map a physically contiguous PCI_P2PDMA_MAP_BUS_ADDR transfer.
184  */
185 static inline dma_addr_t
pci_p2pdma_bus_addr_map(struct pci_p2pdma_map_state * state,phys_addr_t paddr)186 pci_p2pdma_bus_addr_map(struct pci_p2pdma_map_state *state, phys_addr_t paddr)
187 {
188 	WARN_ON_ONCE(state->map != PCI_P2PDMA_MAP_BUS_ADDR);
189 	return paddr + state->bus_off;
190 }
191 
192 #endif /* _LINUX_PCI_P2P_H */
193