1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007, Michael Ellerman, IBM Corporation. 4 */ 5 6 7 #include <linux/interrupt.h> 8 #include <linux/irq.h> 9 #include <linux/kernel.h> 10 #include <linux/pci.h> 11 #include <linux/msi.h> 12 #include <linux/export.h> 13 #include <linux/of_platform.h> 14 #include <linux/slab.h> 15 #include <linux/debugfs.h> 16 #include <linux/of_irq.h> 17 18 #include <asm/dcr.h> 19 #include <asm/machdep.h> 20 21 #include "cell.h" 22 23 /* 24 * MSIC registers, specified as offsets from dcr_base 25 */ 26 #define MSIC_CTRL_REG 0x0 27 28 /* Base Address registers specify FIFO location in BE memory */ 29 #define MSIC_BASE_ADDR_HI_REG 0x3 30 #define MSIC_BASE_ADDR_LO_REG 0x4 31 32 /* Hold the read/write offsets into the FIFO */ 33 #define MSIC_READ_OFFSET_REG 0x5 34 #define MSIC_WRITE_OFFSET_REG 0x6 35 36 37 /* MSIC control register flags */ 38 #define MSIC_CTRL_ENABLE 0x0001 39 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 40 #define MSIC_CTRL_IRQ_ENABLE 0x0008 41 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 42 43 /* 44 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. 45 * Currently we're using a 64KB FIFO size. 46 */ 47 #define MSIC_FIFO_SIZE_SHIFT 16 48 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) 49 50 /* 51 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits 52 * 8-9 of the MSIC control reg. 53 */ 54 #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) 55 56 /* 57 * We need to mask the read/write offsets to make sure they stay within 58 * the bounds of the FIFO. Also they should always be 16-byte aligned. 59 */ 60 #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) 61 62 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ 63 #define MSIC_FIFO_ENTRY_SIZE 0x10 64 65 66 struct axon_msic { 67 struct irq_domain *irq_domain; 68 __le32 *fifo_virt; 69 dma_addr_t fifo_phys; 70 dcr_host_t dcr_host; 71 u32 read_offset; 72 #ifdef DEBUG 73 u32 __iomem *trigger; 74 #endif 75 }; 76 77 #ifdef DEBUG 78 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); 79 #else 80 static inline void axon_msi_debug_setup(struct device_node *dn, 81 struct axon_msic *msic) { } 82 #endif 83 84 85 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) 86 { 87 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); 88 89 dcr_write(msic->dcr_host, dcr_n, val); 90 } 91 92 static void axon_msi_cascade(struct irq_desc *desc) 93 { 94 struct irq_chip *chip = irq_desc_get_chip(desc); 95 struct axon_msic *msic = irq_desc_get_handler_data(desc); 96 u32 write_offset, msi; 97 int idx; 98 int retry = 0; 99 100 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); 101 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); 102 103 /* write_offset doesn't wrap properly, so we have to mask it */ 104 write_offset &= MSIC_FIFO_SIZE_MASK; 105 106 while (msic->read_offset != write_offset && retry < 100) { 107 idx = msic->read_offset / sizeof(__le32); 108 msi = le32_to_cpu(msic->fifo_virt[idx]); 109 msi &= 0xFFFF; 110 111 pr_devel("axon_msi: woff %x roff %x msi %x\n", 112 write_offset, msic->read_offset, msi); 113 114 if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { 115 generic_handle_irq(msi); 116 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); 117 } else { 118 /* 119 * Reading the MSIC_WRITE_OFFSET_REG does not 120 * reliably flush the outstanding DMA to the 121 * FIFO buffer. Here we were reading stale 122 * data, so we need to retry. 123 */ 124 udelay(1); 125 retry++; 126 pr_devel("axon_msi: invalid irq 0x%x!\n", msi); 127 continue; 128 } 129 130 if (retry) { 131 pr_devel("axon_msi: late irq 0x%x, retry %d\n", 132 msi, retry); 133 retry = 0; 134 } 135 136 msic->read_offset += MSIC_FIFO_ENTRY_SIZE; 137 msic->read_offset &= MSIC_FIFO_SIZE_MASK; 138 } 139 140 if (retry) { 141 printk(KERN_WARNING "axon_msi: irq timed out\n"); 142 143 msic->read_offset += MSIC_FIFO_ENTRY_SIZE; 144 msic->read_offset &= MSIC_FIFO_SIZE_MASK; 145 } 146 147 chip->irq_eoi(&desc->irq_data); 148 } 149 150 static struct axon_msic *find_msi_translator(struct pci_dev *dev) 151 { 152 struct irq_domain *irq_domain; 153 struct device_node *dn, *tmp; 154 const phandle *ph; 155 struct axon_msic *msic = NULL; 156 157 dn = of_node_get(pci_device_to_OF_node(dev)); 158 if (!dn) { 159 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); 160 return NULL; 161 } 162 163 for (; dn; dn = of_get_next_parent(dn)) { 164 ph = of_get_property(dn, "msi-translator", NULL); 165 if (ph) 166 break; 167 } 168 169 if (!ph) { 170 dev_dbg(&dev->dev, 171 "axon_msi: no msi-translator property found\n"); 172 goto out_error; 173 } 174 175 tmp = dn; 176 dn = of_find_node_by_phandle(*ph); 177 of_node_put(tmp); 178 if (!dn) { 179 dev_dbg(&dev->dev, 180 "axon_msi: msi-translator doesn't point to a node\n"); 181 goto out_error; 182 } 183 184 irq_domain = irq_find_host(dn); 185 if (!irq_domain) { 186 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", 187 dn); 188 goto out_error; 189 } 190 191 msic = irq_domain->host_data; 192 193 out_error: 194 of_node_put(dn); 195 196 return msic; 197 } 198 199 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) 200 { 201 struct device_node *dn; 202 int len; 203 const u32 *prop; 204 205 dn = of_node_get(pci_device_to_OF_node(dev)); 206 if (!dn) { 207 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); 208 return -ENODEV; 209 } 210 211 for (; dn; dn = of_get_next_parent(dn)) { 212 if (!dev->no_64bit_msi) { 213 prop = of_get_property(dn, "msi-address-64", &len); 214 if (prop) 215 break; 216 } 217 218 prop = of_get_property(dn, "msi-address-32", &len); 219 if (prop) 220 break; 221 } 222 223 if (!prop) { 224 dev_dbg(&dev->dev, 225 "axon_msi: no msi-address-(32|64) properties found\n"); 226 of_node_put(dn); 227 return -ENOENT; 228 } 229 230 switch (len) { 231 case 8: 232 msg->address_hi = prop[0]; 233 msg->address_lo = prop[1]; 234 break; 235 case 4: 236 msg->address_hi = 0; 237 msg->address_lo = prop[0]; 238 break; 239 default: 240 dev_dbg(&dev->dev, 241 "axon_msi: malformed msi-address-(32|64) property\n"); 242 of_node_put(dn); 243 return -EINVAL; 244 } 245 246 of_node_put(dn); 247 248 return 0; 249 } 250 251 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 252 { 253 unsigned int virq, rc; 254 struct msi_desc *entry; 255 struct msi_msg msg; 256 struct axon_msic *msic; 257 258 msic = find_msi_translator(dev); 259 if (!msic) 260 return -ENODEV; 261 262 rc = setup_msi_msg_address(dev, &msg); 263 if (rc) 264 return rc; 265 266 msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { 267 virq = irq_create_direct_mapping(msic->irq_domain); 268 if (!virq) { 269 dev_warn(&dev->dev, 270 "axon_msi: virq allocation failed!\n"); 271 return -1; 272 } 273 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); 274 275 irq_set_msi_desc(virq, entry); 276 msg.data = virq; 277 pci_write_msi_msg(virq, &msg); 278 } 279 280 return 0; 281 } 282 283 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) 284 { 285 struct msi_desc *entry; 286 287 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); 288 289 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { 290 irq_set_msi_desc(entry->irq, NULL); 291 irq_dispose_mapping(entry->irq); 292 entry->irq = 0; 293 } 294 } 295 296 static struct irq_chip msic_irq_chip = { 297 .irq_mask = pci_msi_mask_irq, 298 .irq_unmask = pci_msi_unmask_irq, 299 .irq_shutdown = pci_msi_mask_irq, 300 .name = "AXON-MSI", 301 }; 302 303 static int msic_host_map(struct irq_domain *h, unsigned int virq, 304 irq_hw_number_t hw) 305 { 306 irq_set_chip_data(virq, h->host_data); 307 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); 308 309 return 0; 310 } 311 312 static const struct irq_domain_ops msic_host_ops = { 313 .map = msic_host_map, 314 }; 315 316 static void axon_msi_shutdown(struct platform_device *device) 317 { 318 struct axon_msic *msic = dev_get_drvdata(&device->dev); 319 u32 tmp; 320 321 pr_devel("axon_msi: disabling %pOF\n", 322 irq_domain_get_of_node(msic->irq_domain)); 323 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 324 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 325 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); 326 } 327 328 static int axon_msi_probe(struct platform_device *device) 329 { 330 struct device_node *dn = device->dev.of_node; 331 struct axon_msic *msic; 332 unsigned int virq; 333 int dcr_base, dcr_len; 334 335 pr_devel("axon_msi: setting up dn %pOF\n", dn); 336 337 msic = kzalloc(sizeof(*msic), GFP_KERNEL); 338 if (!msic) { 339 printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", 340 dn); 341 goto out; 342 } 343 344 dcr_base = dcr_resource_start(dn, 0); 345 dcr_len = dcr_resource_len(dn, 0); 346 347 if (dcr_base == 0 || dcr_len == 0) { 348 printk(KERN_ERR 349 "axon_msi: couldn't parse dcr properties on %pOF\n", 350 dn); 351 goto out_free_msic; 352 } 353 354 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); 355 if (!DCR_MAP_OK(msic->dcr_host)) { 356 printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", 357 dn); 358 goto out_free_msic; 359 } 360 361 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, 362 &msic->fifo_phys, GFP_KERNEL); 363 if (!msic->fifo_virt) { 364 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", 365 dn); 366 goto out_free_msic; 367 } 368 369 virq = irq_of_parse_and_map(dn, 0); 370 if (!virq) { 371 printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", 372 dn); 373 goto out_free_fifo; 374 } 375 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 376 377 /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ 378 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); 379 if (!msic->irq_domain) { 380 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", 381 dn); 382 goto out_free_fifo; 383 } 384 385 irq_set_handler_data(virq, msic); 386 irq_set_chained_handler(virq, axon_msi_cascade); 387 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); 388 389 /* Enable the MSIC hardware */ 390 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); 391 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, 392 msic->fifo_phys & 0xFFFFFFFF); 393 msic_dcr_write(msic, MSIC_CTRL_REG, 394 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | 395 MSIC_CTRL_FIFO_SIZE); 396 397 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) 398 & MSIC_FIFO_SIZE_MASK; 399 400 dev_set_drvdata(&device->dev, msic); 401 402 cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs; 403 cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs; 404 405 axon_msi_debug_setup(dn, msic); 406 407 printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); 408 409 return 0; 410 411 out_free_fifo: 412 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, 413 msic->fifo_phys); 414 out_free_msic: 415 kfree(msic); 416 out: 417 418 return -1; 419 } 420 421 static const struct of_device_id axon_msi_device_id[] = { 422 { 423 .compatible = "ibm,axon-msic" 424 }, 425 {} 426 }; 427 428 static struct platform_driver axon_msi_driver = { 429 .probe = axon_msi_probe, 430 .shutdown = axon_msi_shutdown, 431 .driver = { 432 .name = "axon-msi", 433 .of_match_table = axon_msi_device_id, 434 }, 435 }; 436 437 static int __init axon_msi_init(void) 438 { 439 return platform_driver_register(&axon_msi_driver); 440 } 441 subsys_initcall(axon_msi_init); 442 443 444 #ifdef DEBUG 445 static int msic_set(void *data, u64 val) 446 { 447 struct axon_msic *msic = data; 448 out_le32(msic->trigger, val); 449 return 0; 450 } 451 452 static int msic_get(void *data, u64 *val) 453 { 454 *val = 0; 455 return 0; 456 } 457 458 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); 459 460 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) 461 { 462 char name[8]; 463 u64 addr; 464 465 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL)); 466 if (addr == OF_BAD_ADDR) { 467 pr_devel("axon_msi: couldn't translate reg property\n"); 468 return; 469 } 470 471 msic->trigger = ioremap(addr, 0x4); 472 if (!msic->trigger) { 473 pr_devel("axon_msi: ioremap failed\n"); 474 return; 475 } 476 477 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); 478 479 debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic); 480 } 481 #endif /* DEBUG */ 482