1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * GNU General Public License ("GPL") as published by the Free Software 16 * Foundation, either version 2 of that License or (at your option) any 17 * later version. 18 * 19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "qman_priv.h" 32 33 /* Enable portal interupts (as opposed to polling mode) */ 34 #define CONFIG_FSL_DPA_PIRQ_SLOW 1 35 #define CONFIG_FSL_DPA_PIRQ_FAST 1 36 37 static struct cpumask portal_cpus; 38 /* protect qman global registers and global data shared among portals */ 39 static DEFINE_SPINLOCK(qman_lock); 40 41 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) 42 { 43 #ifdef CONFIG_FSL_PAMU 44 struct device *dev = pcfg->dev; 45 int window_count = 1; 46 struct iommu_domain_geometry geom_attr; 47 struct pamu_stash_attribute stash_attr; 48 int ret; 49 50 pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); 51 if (!pcfg->iommu_domain) { 52 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); 53 goto no_iommu; 54 } 55 geom_attr.aperture_start = 0; 56 geom_attr.aperture_end = 57 ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; 58 geom_attr.force_aperture = true; 59 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, 60 &geom_attr); 61 if (ret < 0) { 62 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, 63 ret); 64 goto out_domain_free; 65 } 66 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, 67 &window_count); 68 if (ret < 0) { 69 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, 70 ret); 71 goto out_domain_free; 72 } 73 stash_attr.cpu = cpu; 74 stash_attr.cache = PAMU_ATTR_CACHE_L1; 75 ret = iommu_domain_set_attr(pcfg->iommu_domain, 76 DOMAIN_ATTR_FSL_PAMU_STASH, 77 &stash_attr); 78 if (ret < 0) { 79 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", 80 __func__, ret); 81 goto out_domain_free; 82 } 83 ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, 84 IOMMU_READ | IOMMU_WRITE); 85 if (ret < 0) { 86 dev_err(dev, "%s(): iommu_domain_window_enable() = %d", 87 __func__, ret); 88 goto out_domain_free; 89 } 90 ret = iommu_attach_device(pcfg->iommu_domain, dev); 91 if (ret < 0) { 92 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, 93 ret); 94 goto out_domain_free; 95 } 96 ret = iommu_domain_set_attr(pcfg->iommu_domain, 97 DOMAIN_ATTR_FSL_PAMU_ENABLE, 98 &window_count); 99 if (ret < 0) { 100 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, 101 ret); 102 goto out_detach_device; 103 } 104 105 no_iommu: 106 #endif 107 qman_set_sdest(pcfg->channel, cpu); 108 109 return; 110 111 #ifdef CONFIG_FSL_PAMU 112 out_detach_device: 113 iommu_detach_device(pcfg->iommu_domain, NULL); 114 out_domain_free: 115 iommu_domain_free(pcfg->iommu_domain); 116 pcfg->iommu_domain = NULL; 117 #endif 118 } 119 120 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) 121 { 122 struct qman_portal *p; 123 u32 irq_sources = 0; 124 125 /* We need the same LIODN offset for all portals */ 126 qman_liodn_fixup(pcfg->channel); 127 128 pcfg->iommu_domain = NULL; 129 portal_set_cpu(pcfg, pcfg->cpu); 130 131 p = qman_create_affine_portal(pcfg, NULL); 132 if (!p) { 133 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", 134 __func__, pcfg->cpu); 135 return NULL; 136 } 137 138 /* Determine what should be interrupt-vs-poll driven */ 139 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW 140 irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | 141 QM_PIRQ_CSCI; 142 #endif 143 #ifdef CONFIG_FSL_DPA_PIRQ_FAST 144 irq_sources |= QM_PIRQ_DQRI; 145 #endif 146 qman_p_irqsource_add(p, irq_sources); 147 148 spin_lock(&qman_lock); 149 if (cpumask_equal(&portal_cpus, cpu_possible_mask)) { 150 /* all assigned portals are initialized now */ 151 qman_init_cgr_all(); 152 } 153 spin_unlock(&qman_lock); 154 155 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); 156 157 return p; 158 } 159 160 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, 161 unsigned int cpu) 162 { 163 #ifdef CONFIG_FSL_PAMU /* TODO */ 164 struct pamu_stash_attribute stash_attr; 165 int ret; 166 167 if (pcfg->iommu_domain) { 168 stash_attr.cpu = cpu; 169 stash_attr.cache = PAMU_ATTR_CACHE_L1; 170 ret = iommu_domain_set_attr(pcfg->iommu_domain, 171 DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); 172 if (ret < 0) { 173 dev_err(pcfg->dev, 174 "Failed to update pamu stash setting\n"); 175 return; 176 } 177 } 178 #endif 179 qman_set_sdest(pcfg->channel, cpu); 180 } 181 182 static int qman_offline_cpu(unsigned int cpu) 183 { 184 struct qman_portal *p; 185 const struct qm_portal_config *pcfg; 186 187 p = affine_portals[cpu]; 188 if (p) { 189 pcfg = qman_get_qm_portal_config(p); 190 if (pcfg) { 191 irq_set_affinity(pcfg->irq, cpumask_of(0)); 192 qman_portal_update_sdest(pcfg, 0); 193 } 194 } 195 return 0; 196 } 197 198 static int qman_online_cpu(unsigned int cpu) 199 { 200 struct qman_portal *p; 201 const struct qm_portal_config *pcfg; 202 203 p = affine_portals[cpu]; 204 if (p) { 205 pcfg = qman_get_qm_portal_config(p); 206 if (pcfg) { 207 irq_set_affinity(pcfg->irq, cpumask_of(cpu)); 208 qman_portal_update_sdest(pcfg, cpu); 209 } 210 } 211 return 0; 212 } 213 214 static int qman_portal_probe(struct platform_device *pdev) 215 { 216 struct device *dev = &pdev->dev; 217 struct device_node *node = dev->of_node; 218 struct qm_portal_config *pcfg; 219 struct resource *addr_phys[2]; 220 const u32 *channel; 221 void __iomem *va; 222 int irq, len, cpu; 223 224 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 225 if (!pcfg) 226 return -ENOMEM; 227 228 pcfg->dev = dev; 229 230 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, 231 DPAA_PORTAL_CE); 232 if (!addr_phys[0]) { 233 dev_err(dev, "Can't get %s property 'reg::CE'\n", 234 node->full_name); 235 return -ENXIO; 236 } 237 238 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 239 DPAA_PORTAL_CI); 240 if (!addr_phys[1]) { 241 dev_err(dev, "Can't get %s property 'reg::CI'\n", 242 node->full_name); 243 return -ENXIO; 244 } 245 246 channel = of_get_property(node, "cell-index", &len); 247 if (!channel || (len != 4)) { 248 dev_err(dev, "Can't get %s property 'cell-index'\n", 249 node->full_name); 250 return -ENXIO; 251 } 252 pcfg->channel = *channel; 253 pcfg->cpu = -1; 254 irq = platform_get_irq(pdev, 0); 255 if (irq <= 0) { 256 dev_err(dev, "Can't get %s IRQ\n", node->full_name); 257 return -ENXIO; 258 } 259 pcfg->irq = irq; 260 261 va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); 262 if (!va) 263 goto err_ioremap1; 264 265 pcfg->addr_virt[DPAA_PORTAL_CE] = va; 266 267 va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), 268 _PAGE_GUARDED | _PAGE_NO_CACHE); 269 if (!va) 270 goto err_ioremap2; 271 272 pcfg->addr_virt[DPAA_PORTAL_CI] = va; 273 274 pcfg->pools = qm_get_pools_sdqcr(); 275 276 spin_lock(&qman_lock); 277 cpu = cpumask_next_zero(-1, &portal_cpus); 278 if (cpu >= nr_cpu_ids) { 279 /* unassigned portal, skip init */ 280 spin_unlock(&qman_lock); 281 return 0; 282 } 283 284 cpumask_set_cpu(cpu, &portal_cpus); 285 spin_unlock(&qman_lock); 286 pcfg->cpu = cpu; 287 288 if (!init_pcfg(pcfg)) 289 goto err_ioremap2; 290 291 /* clear irq affinity if assigned cpu is offline */ 292 if (!cpu_online(cpu)) 293 qman_offline_cpu(cpu); 294 295 return 0; 296 297 err_ioremap2: 298 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); 299 err_ioremap1: 300 dev_err(dev, "ioremap failed\n"); 301 return -ENXIO; 302 } 303 304 static const struct of_device_id qman_portal_ids[] = { 305 { 306 .compatible = "fsl,qman-portal", 307 }, 308 {} 309 }; 310 MODULE_DEVICE_TABLE(of, qman_portal_ids); 311 312 static struct platform_driver qman_portal_driver = { 313 .driver = { 314 .name = KBUILD_MODNAME, 315 .of_match_table = qman_portal_ids, 316 }, 317 .probe = qman_portal_probe, 318 }; 319 320 static int __init qman_portal_driver_register(struct platform_driver *drv) 321 { 322 int ret; 323 324 ret = platform_driver_register(drv); 325 if (ret < 0) 326 return ret; 327 328 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 329 "soc/qman_portal:online", 330 qman_online_cpu, qman_offline_cpu); 331 if (ret < 0) { 332 pr_err("qman: failed to register hotplug callbacks.\n"); 333 platform_driver_unregister(drv); 334 return ret; 335 } 336 return 0; 337 } 338 339 module_driver(qman_portal_driver, 340 qman_portal_driver_register, platform_driver_unregister); 341