1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "qman_priv.h"
32
33 struct qman_portal *qman_dma_portal;
34 EXPORT_SYMBOL(qman_dma_portal);
35
36 /* Enable portal interupts (as opposed to polling mode) */
37 #define CONFIG_FSL_DPA_PIRQ_SLOW 1
38 #define CONFIG_FSL_DPA_PIRQ_FAST 1
39
40 static struct cpumask portal_cpus;
41 static int __qman_portals_probed;
42 /* protect qman global registers and global data shared among portals */
43 static DEFINE_SPINLOCK(qman_lock);
44
portal_set_cpu(struct qm_portal_config * pcfg,int cpu)45 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
46 {
47 #ifdef CONFIG_FSL_PAMU
48 struct device *dev = pcfg->dev;
49 int ret;
50
51 pcfg->iommu_domain = iommu_paging_domain_alloc(dev);
52 if (IS_ERR(pcfg->iommu_domain)) {
53 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
54 pcfg->iommu_domain = NULL;
55 goto no_iommu;
56 }
57 ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
58 if (ret < 0) {
59 dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
60 __func__, ret);
61 goto out_domain_free;
62 }
63 ret = iommu_attach_device(pcfg->iommu_domain, dev);
64 if (ret < 0) {
65 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
66 ret);
67 goto out_domain_free;
68 }
69
70 no_iommu:
71 #endif
72 qman_set_sdest(pcfg->channel, cpu);
73
74 return;
75
76 #ifdef CONFIG_FSL_PAMU
77 out_domain_free:
78 iommu_domain_free(pcfg->iommu_domain);
79 pcfg->iommu_domain = NULL;
80 #endif
81 }
82
init_pcfg(struct qm_portal_config * pcfg)83 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
84 {
85 struct qman_portal *p;
86 u32 irq_sources = 0;
87
88 /* We need the same LIODN offset for all portals */
89 qman_liodn_fixup(pcfg->channel);
90
91 pcfg->iommu_domain = NULL;
92 portal_set_cpu(pcfg, pcfg->cpu);
93
94 p = qman_create_affine_portal(pcfg, NULL);
95 if (!p) {
96 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
97 __func__, pcfg->cpu);
98 return NULL;
99 }
100
101 /* Determine what should be interrupt-vs-poll driven */
102 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
103 irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
104 QM_PIRQ_CSCI;
105 #endif
106 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
107 irq_sources |= QM_PIRQ_DQRI;
108 #endif
109 qman_p_irqsource_add(p, irq_sources);
110
111 spin_lock(&qman_lock);
112 if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
113 /* all assigned portals are initialized now */
114 qman_init_cgr_all();
115 }
116
117 if (!qman_dma_portal)
118 qman_dma_portal = p;
119
120 spin_unlock(&qman_lock);
121
122 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
123
124 return p;
125 }
126
qman_portal_update_sdest(const struct qm_portal_config * pcfg,unsigned int cpu)127 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
128 unsigned int cpu)
129 {
130 #ifdef CONFIG_FSL_PAMU /* TODO */
131 if (pcfg->iommu_domain) {
132 if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
133 dev_err(pcfg->dev,
134 "Failed to update pamu stash setting\n");
135 return;
136 }
137 }
138 #endif
139 qman_set_sdest(pcfg->channel, cpu);
140 }
141
qman_offline_cpu(unsigned int cpu)142 static int qman_offline_cpu(unsigned int cpu)
143 {
144 struct qman_portal *p;
145 const struct qm_portal_config *pcfg;
146
147 p = affine_portals[cpu];
148 if (p) {
149 pcfg = qman_get_qm_portal_config(p);
150 if (pcfg) {
151 /* select any other online CPU */
152 cpu = cpumask_any_but(cpu_online_mask, cpu);
153 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
154 qman_portal_update_sdest(pcfg, cpu);
155 }
156 }
157 return 0;
158 }
159
qman_online_cpu(unsigned int cpu)160 static int qman_online_cpu(unsigned int cpu)
161 {
162 struct qman_portal *p;
163 const struct qm_portal_config *pcfg;
164
165 p = affine_portals[cpu];
166 if (p) {
167 pcfg = qman_get_qm_portal_config(p);
168 if (pcfg) {
169 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
170 qman_portal_update_sdest(pcfg, cpu);
171 }
172 }
173 return 0;
174 }
175
qman_portals_probed(void)176 int qman_portals_probed(void)
177 {
178 return __qman_portals_probed;
179 }
180 EXPORT_SYMBOL_GPL(qman_portals_probed);
181
qman_portal_probe(struct platform_device * pdev)182 static int qman_portal_probe(struct platform_device *pdev)
183 {
184 struct device *dev = &pdev->dev;
185 struct device_node *node = dev->of_node;
186 struct qm_portal_config *pcfg;
187 struct resource *addr_phys[2];
188 int irq, cpu, err, i;
189 u32 val;
190
191 err = qman_is_probed();
192 if (!err)
193 return -EPROBE_DEFER;
194 if (err < 0) {
195 dev_err(&pdev->dev, "failing probe due to qman probe error\n");
196 return -ENODEV;
197 }
198
199 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
200 if (!pcfg) {
201 __qman_portals_probed = -1;
202 return -ENOMEM;
203 }
204
205 pcfg->dev = dev;
206
207 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
208 DPAA_PORTAL_CE);
209 if (!addr_phys[0]) {
210 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
211 goto err_ioremap1;
212 }
213
214 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
215 DPAA_PORTAL_CI);
216 if (!addr_phys[1]) {
217 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
218 goto err_ioremap1;
219 }
220
221 err = of_property_read_u32(node, "cell-index", &val);
222 if (err) {
223 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
224 __qman_portals_probed = -1;
225 return err;
226 }
227 pcfg->channel = val;
228 pcfg->cpu = -1;
229 irq = platform_get_irq(pdev, 0);
230 if (irq <= 0)
231 goto err_ioremap1;
232 pcfg->irq = irq;
233
234 pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
235 resource_size(addr_phys[0]),
236 QBMAN_MEMREMAP_ATTR);
237 if (!pcfg->addr_virt_ce) {
238 dev_err(dev, "memremap::CE failed\n");
239 goto err_ioremap1;
240 }
241
242 pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
243 resource_size(addr_phys[1]));
244 if (!pcfg->addr_virt_ci) {
245 dev_err(dev, "ioremap::CI failed\n");
246 goto err_ioremap2;
247 }
248
249 pcfg->pools = qm_get_pools_sdqcr();
250
251 spin_lock(&qman_lock);
252 cpu = cpumask_first_zero(&portal_cpus);
253 if (cpu >= nr_cpu_ids) {
254 __qman_portals_probed = 1;
255 /* unassigned portal, skip init */
256 spin_unlock(&qman_lock);
257 goto check_cleanup;
258 }
259
260 cpumask_set_cpu(cpu, &portal_cpus);
261 spin_unlock(&qman_lock);
262 pcfg->cpu = cpu;
263
264 if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
265 dev_err(dev, "dma_set_mask() failed\n");
266 goto err_portal_init;
267 }
268
269 if (!init_pcfg(pcfg)) {
270 dev_err(dev, "portal init failed\n");
271 goto err_portal_init;
272 }
273
274 /* clear irq affinity if assigned cpu is offline */
275 if (!cpu_online(cpu))
276 qman_offline_cpu(cpu);
277
278 check_cleanup:
279 if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
280 /*
281 * QMan wasn't reset prior to boot (Kexec for example)
282 * Empty all the frame queues so they are in reset state
283 */
284 for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
285 err = qman_shutdown_fq(i);
286 if (err) {
287 dev_err(dev, "Failed to shutdown frame queue %d\n",
288 i);
289 goto err_portal_init;
290 }
291 }
292 qman_done_cleanup();
293 }
294
295 return 0;
296
297 err_portal_init:
298 iounmap(pcfg->addr_virt_ci);
299 err_ioremap2:
300 memunmap(pcfg->addr_virt_ce);
301 err_ioremap1:
302 __qman_portals_probed = -1;
303
304 return -ENXIO;
305 }
306
307 static const struct of_device_id qman_portal_ids[] = {
308 {
309 .compatible = "fsl,qman-portal",
310 },
311 {}
312 };
313 MODULE_DEVICE_TABLE(of, qman_portal_ids);
314
315 static struct platform_driver qman_portal_driver = {
316 .driver = {
317 .name = KBUILD_MODNAME,
318 .of_match_table = qman_portal_ids,
319 },
320 .probe = qman_portal_probe,
321 };
322
qman_portal_driver_register(struct platform_driver * drv)323 static int __init qman_portal_driver_register(struct platform_driver *drv)
324 {
325 int ret;
326
327 ret = platform_driver_register(drv);
328 if (ret < 0)
329 return ret;
330
331 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
332 "soc/qman_portal:online",
333 qman_online_cpu, qman_offline_cpu);
334 if (ret < 0) {
335 pr_err("qman: failed to register hotplug callbacks.\n");
336 platform_driver_unregister(drv);
337 return ret;
338 }
339 return 0;
340 }
341
342 module_driver(qman_portal_driver,
343 qman_portal_driver_register, platform_driver_unregister);
344