xref: /linux/arch/mips/cavium-octeon/dma-octeon.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
8  * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10  * IP32 changes by Ilya.
11  * Copyright (C) 2010 Cavium Networks, Inc.
12  */
13 #include <linux/dma-direct.h>
14 #include <linux/memblock.h>
15 #include <linux/swiotlb.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 
20 #include <asm/bootinfo.h>
21 
22 #include <asm/octeon/octeon.h>
23 
24 #ifdef CONFIG_PCI
25 #include <linux/pci.h>
26 #include <asm/octeon/pci-octeon.h>
27 #include <asm/octeon/cvmx-npi-defs.h>
28 #include <asm/octeon/cvmx-pci-defs.h>
29 
30 struct octeon_dma_map_ops {
31 	dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
32 	phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
33 };
34 
35 static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
36 {
37 	if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
38 		return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
39 	else
40 		return paddr;
41 }
42 
43 static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
44 {
45 	if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
46 		return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
47 	else
48 		return daddr;
49 }
50 
51 static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
52 {
53 	if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
54 		paddr -= 0x400000000ull;
55 	return octeon_hole_phys_to_dma(paddr);
56 }
57 
58 static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
59 {
60 	daddr = octeon_hole_dma_to_phys(daddr);
61 
62 	if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
63 		daddr += 0x400000000ull;
64 
65 	return daddr;
66 }
67 
68 static const struct octeon_dma_map_ops octeon_gen1_ops = {
69 	.phys_to_dma	= octeon_gen1_phys_to_dma,
70 	.dma_to_phys	= octeon_gen1_dma_to_phys,
71 };
72 
73 static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
74 {
75 	return octeon_hole_phys_to_dma(paddr);
76 }
77 
78 static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
79 {
80 	return octeon_hole_dma_to_phys(daddr);
81 }
82 
83 static const struct octeon_dma_map_ops octeon_gen2_ops = {
84 	.phys_to_dma	= octeon_gen2_phys_to_dma,
85 	.dma_to_phys	= octeon_gen2_dma_to_phys,
86 };
87 
88 static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
89 {
90 	if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
91 		paddr -= 0x400000000ull;
92 
93 	/* Anything in the BAR1 hole or above goes via BAR2 */
94 	if (paddr >= 0xf0000000ull)
95 		paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
96 
97 	return paddr;
98 }
99 
100 static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
101 {
102 	if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
103 		daddr -= OCTEON_BAR2_PCI_ADDRESS;
104 
105 	if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
106 		daddr += 0x400000000ull;
107 	return daddr;
108 }
109 
110 static const struct octeon_dma_map_ops octeon_big_ops = {
111 	.phys_to_dma	= octeon_big_phys_to_dma,
112 	.dma_to_phys	= octeon_big_dma_to_phys,
113 };
114 
115 static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
116 					   phys_addr_t paddr)
117 {
118 	if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
119 		paddr -= 0x400000000ull;
120 
121 	/* Anything not in the BAR1 range goes via BAR2 */
122 	if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
123 		paddr = paddr - octeon_bar1_pci_phys;
124 	else
125 		paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
126 
127 	return paddr;
128 }
129 
130 static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
131 					    dma_addr_t daddr)
132 {
133 	if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
134 		daddr -= OCTEON_BAR2_PCI_ADDRESS;
135 	else
136 		daddr += octeon_bar1_pci_phys;
137 
138 	if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
139 		daddr += 0x400000000ull;
140 	return daddr;
141 }
142 
143 static const struct octeon_dma_map_ops octeon_small_ops = {
144 	.phys_to_dma	= octeon_small_phys_to_dma,
145 	.dma_to_phys	= octeon_small_dma_to_phys,
146 };
147 
148 static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
149 
150 void __init octeon_pci_dma_init(void)
151 {
152 	switch (octeon_dma_bar_type) {
153 	case OCTEON_DMA_BAR_TYPE_PCIE:
154 		octeon_pci_dma_ops = &octeon_gen1_ops;
155 		break;
156 	case OCTEON_DMA_BAR_TYPE_PCIE2:
157 		octeon_pci_dma_ops = &octeon_gen2_ops;
158 		break;
159 	case OCTEON_DMA_BAR_TYPE_BIG:
160 		octeon_pci_dma_ops = &octeon_big_ops;
161 		break;
162 	case OCTEON_DMA_BAR_TYPE_SMALL:
163 		octeon_pci_dma_ops = &octeon_small_ops;
164 		break;
165 	default:
166 		BUG();
167 	}
168 }
169 #endif /* CONFIG_PCI */
170 
171 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
172 {
173 #ifdef CONFIG_PCI
174 	if (dev && dev_is_pci(dev))
175 		return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
176 #endif
177 	return paddr;
178 }
179 
180 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
181 {
182 #ifdef CONFIG_PCI
183 	if (dev && dev_is_pci(dev))
184 		return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
185 #endif
186 	return daddr;
187 }
188 
189 char *octeon_swiotlb;
190 
191 void __init plat_swiotlb_setup(void)
192 {
193 	phys_addr_t start, end;
194 	phys_addr_t max_addr;
195 	phys_addr_t addr_size;
196 	size_t swiotlbsize;
197 	unsigned long swiotlb_nslabs;
198 	u64 i;
199 
200 	max_addr = 0;
201 	addr_size = 0;
202 
203 	for_each_mem_range(i, &start, &end) {
204 		/* These addresses map low for PCI. */
205 		if (start > 0x410000000ull && !OCTEON_IS_OCTEON2())
206 			continue;
207 
208 		addr_size += (end - start);
209 
210 		if (max_addr < end)
211 			max_addr = end;
212 	}
213 
214 	swiotlbsize = PAGE_SIZE;
215 
216 #ifdef CONFIG_PCI
217 	/*
218 	 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
219 	 * size to a maximum of 64MB
220 	 */
221 	if (OCTEON_IS_MODEL(OCTEON_CN31XX)
222 	    || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
223 		swiotlbsize = addr_size / 4;
224 		if (swiotlbsize > 64 * (1<<20))
225 			swiotlbsize = 64 * (1<<20);
226 	} else if (max_addr > 0xf0000000ul) {
227 		/*
228 		 * Otherwise only allocate a big iotlb if there is
229 		 * memory past the BAR1 hole.
230 		 */
231 		swiotlbsize = 64 * (1<<20);
232 	}
233 #endif
234 #ifdef CONFIG_USB_OHCI_HCD_PLATFORM
235 	/* OCTEON II ohci is only 32-bit. */
236 	if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
237 		swiotlbsize = 64 * (1<<20);
238 #endif
239 	swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
240 	swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
241 	swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
242 
243 	octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
244 	if (!octeon_swiotlb)
245 		panic("%s: Failed to allocate %zu bytes align=%lx\n",
246 		      __func__, swiotlbsize, PAGE_SIZE);
247 
248 	if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
249 		panic("Cannot allocate SWIOTLB buffer");
250 }
251