1 /* 2 * arch/ppc64/kernel/pSeries_iommu.c 3 * 4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 5 * 6 * Rewrite, cleanup: 7 * 8 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation 9 * 10 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. 11 * 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/config.h> 29 #include <linux/init.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/mm.h> 33 #include <linux/spinlock.h> 34 #include <linux/string.h> 35 #include <linux/pci.h> 36 #include <linux/dma-mapping.h> 37 #include <asm/io.h> 38 #include <asm/prom.h> 39 #include <asm/rtas.h> 40 #include <asm/iommu.h> 41 #include <asm/pci-bridge.h> 42 #include <asm/machdep.h> 43 #include <asm/abs_addr.h> 44 #include <asm/pSeries_reconfig.h> 45 #include <asm/firmware.h> 46 #include <asm/tce.h> 47 #include <asm/ppc-pci.h> 48 #include <asm/udbg.h> 49 50 #include "plpar_wrappers.h" 51 52 #define DBG(fmt...) 53 54 extern int is_python(struct device_node *); 55 56 static void tce_build_pSeries(struct iommu_table *tbl, long index, 57 long npages, unsigned long uaddr, 58 enum dma_data_direction direction) 59 { 60 union tce_entry t; 61 union tce_entry *tp; 62 63 index <<= TCE_PAGE_FACTOR; 64 npages <<= TCE_PAGE_FACTOR; 65 66 t.te_word = 0; 67 t.te_rdwr = 1; // Read allowed 68 69 if (direction != DMA_TO_DEVICE) 70 t.te_pciwr = 1; 71 72 tp = ((union tce_entry *)tbl->it_base) + index; 73 74 while (npages--) { 75 /* can't move this out since we might cross LMB boundary */ 76 t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 77 78 tp->te_word = t.te_word; 79 80 uaddr += TCE_PAGE_SIZE; 81 tp++; 82 } 83 } 84 85 86 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) 87 { 88 union tce_entry t; 89 union tce_entry *tp; 90 91 npages <<= TCE_PAGE_FACTOR; 92 index <<= TCE_PAGE_FACTOR; 93 94 t.te_word = 0; 95 tp = ((union tce_entry *)tbl->it_base) + index; 96 97 while (npages--) { 98 tp->te_word = t.te_word; 99 100 tp++; 101 } 102 } 103 104 105 static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, 106 long npages, unsigned long uaddr, 107 enum dma_data_direction direction) 108 { 109 u64 rc; 110 union tce_entry tce; 111 112 tcenum <<= TCE_PAGE_FACTOR; 113 npages <<= TCE_PAGE_FACTOR; 114 115 tce.te_word = 0; 116 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 117 tce.te_rdwr = 1; 118 if (direction != DMA_TO_DEVICE) 119 tce.te_pciwr = 1; 120 121 while (npages--) { 122 rc = plpar_tce_put((u64)tbl->it_index, 123 (u64)tcenum << 12, 124 tce.te_word ); 125 126 if (rc && printk_ratelimit()) { 127 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 128 printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 129 printk("\ttcenum = 0x%lx\n", (u64)tcenum); 130 printk("\ttce val = 0x%lx\n", tce.te_word ); 131 show_stack(current, (unsigned long *)__get_SP()); 132 } 133 134 tcenum++; 135 tce.te_rpn++; 136 } 137 } 138 139 static DEFINE_PER_CPU(void *, tce_page) = NULL; 140 141 static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, 142 long npages, unsigned long uaddr, 143 enum dma_data_direction direction) 144 { 145 u64 rc; 146 union tce_entry tce, *tcep; 147 long l, limit; 148 149 if (TCE_PAGE_FACTOR == 0 && npages == 1) 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 151 direction); 152 153 tcep = __get_cpu_var(tce_page); 154 155 /* This is safe to do since interrupts are off when we're called 156 * from iommu_alloc{,_sg}() 157 */ 158 if (!tcep) { 159 tcep = (void *)__get_free_page(GFP_ATOMIC); 160 /* If allocation fails, fall back to the loop implementation */ 161 if (!tcep) 162 return tce_build_pSeriesLP(tbl, tcenum, npages, 163 uaddr, direction); 164 __get_cpu_var(tce_page) = tcep; 165 } 166 167 tcenum <<= TCE_PAGE_FACTOR; 168 npages <<= TCE_PAGE_FACTOR; 169 170 tce.te_word = 0; 171 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 172 tce.te_rdwr = 1; 173 if (direction != DMA_TO_DEVICE) 174 tce.te_pciwr = 1; 175 176 /* We can map max one pageful of TCEs at a time */ 177 do { 178 /* 179 * Set up the page with TCE data, looping through and setting 180 * the values. 181 */ 182 limit = min_t(long, npages, 4096/sizeof(union tce_entry)); 183 184 for (l = 0; l < limit; l++) { 185 tcep[l] = tce; 186 tce.te_rpn++; 187 } 188 189 rc = plpar_tce_put_indirect((u64)tbl->it_index, 190 (u64)tcenum << 12, 191 (u64)virt_to_abs(tcep), 192 limit); 193 194 npages -= limit; 195 tcenum += limit; 196 } while (npages > 0 && !rc); 197 198 if (rc && printk_ratelimit()) { 199 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 200 printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 201 printk("\tnpages = 0x%lx\n", (u64)npages); 202 printk("\ttce[0] val = 0x%lx\n", tcep[0].te_word); 203 show_stack(current, (unsigned long *)__get_SP()); 204 } 205 } 206 207 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) 208 { 209 u64 rc; 210 union tce_entry tce; 211 212 tcenum <<= TCE_PAGE_FACTOR; 213 npages <<= TCE_PAGE_FACTOR; 214 215 tce.te_word = 0; 216 217 while (npages--) { 218 rc = plpar_tce_put((u64)tbl->it_index, 219 (u64)tcenum << 12, 220 tce.te_word); 221 222 if (rc && printk_ratelimit()) { 223 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); 224 printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 225 printk("\ttcenum = 0x%lx\n", (u64)tcenum); 226 printk("\ttce val = 0x%lx\n", tce.te_word ); 227 show_stack(current, (unsigned long *)__get_SP()); 228 } 229 230 tcenum++; 231 } 232 } 233 234 235 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) 236 { 237 u64 rc; 238 union tce_entry tce; 239 240 tcenum <<= TCE_PAGE_FACTOR; 241 npages <<= TCE_PAGE_FACTOR; 242 243 tce.te_word = 0; 244 245 rc = plpar_tce_stuff((u64)tbl->it_index, 246 (u64)tcenum << 12, 247 tce.te_word, 248 npages); 249 250 if (rc && printk_ratelimit()) { 251 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); 252 printk("\trc = %ld\n", rc); 253 printk("\tindex = 0x%lx\n", (u64)tbl->it_index); 254 printk("\tnpages = 0x%lx\n", (u64)npages); 255 printk("\ttce val = 0x%lx\n", tce.te_word ); 256 show_stack(current, (unsigned long *)__get_SP()); 257 } 258 } 259 260 static void iommu_table_setparms(struct pci_controller *phb, 261 struct device_node *dn, 262 struct iommu_table *tbl) 263 { 264 struct device_node *node; 265 unsigned long *basep; 266 unsigned int *sizep; 267 268 node = (struct device_node *)phb->arch_data; 269 270 basep = (unsigned long *)get_property(node, "linux,tce-base", NULL); 271 sizep = (unsigned int *)get_property(node, "linux,tce-size", NULL); 272 if (basep == NULL || sizep == NULL) { 273 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has " 274 "missing tce entries !\n", dn->full_name); 275 return; 276 } 277 278 tbl->it_base = (unsigned long)__va(*basep); 279 memset((void *)tbl->it_base, 0, *sizep); 280 281 tbl->it_busno = phb->bus->number; 282 283 /* Units of tce entries */ 284 tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT; 285 286 /* Test if we are going over 2GB of DMA space */ 287 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { 288 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); 289 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); 290 } 291 292 phb->dma_window_base_cur += phb->dma_window_size; 293 294 /* Set the tce table size - measured in entries */ 295 tbl->it_size = phb->dma_window_size >> PAGE_SHIFT; 296 297 tbl->it_index = 0; 298 tbl->it_blocksize = 16; 299 tbl->it_type = TCE_PCI; 300 } 301 302 /* 303 * iommu_table_setparms_lpar 304 * 305 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus. 306 * 307 * ToDo: properly interpret the ibm,dma-window property. The definition is: 308 * logical-bus-number (1 word) 309 * phys-address (#address-cells words) 310 * size (#cell-size words) 311 * 312 * Currently we hard code these sizes (more or less). 313 */ 314 static void iommu_table_setparms_lpar(struct pci_controller *phb, 315 struct device_node *dn, 316 struct iommu_table *tbl, 317 unsigned int *dma_window) 318 { 319 tbl->it_busno = PCI_DN(dn)->bussubno; 320 321 /* TODO: Parse field size properties properly. */ 322 tbl->it_size = (((unsigned long)dma_window[4] << 32) | 323 (unsigned long)dma_window[5]) >> PAGE_SHIFT; 324 tbl->it_offset = (((unsigned long)dma_window[2] << 32) | 325 (unsigned long)dma_window[3]) >> PAGE_SHIFT; 326 tbl->it_base = 0; 327 tbl->it_index = dma_window[0]; 328 tbl->it_blocksize = 16; 329 tbl->it_type = TCE_PCI; 330 } 331 332 static void iommu_bus_setup_pSeries(struct pci_bus *bus) 333 { 334 struct device_node *dn; 335 struct iommu_table *tbl; 336 struct device_node *isa_dn, *isa_dn_orig; 337 struct device_node *tmp; 338 struct pci_dn *pci; 339 int children; 340 341 DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self); 342 343 dn = pci_bus_to_OF_node(bus); 344 pci = PCI_DN(dn); 345 346 if (bus->self) { 347 /* This is not a root bus, any setup will be done for the 348 * device-side of the bridge in iommu_dev_setup_pSeries(). 349 */ 350 return; 351 } 352 353 /* Check if the ISA bus on the system is under 354 * this PHB. 355 */ 356 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa"); 357 358 while (isa_dn && isa_dn != dn) 359 isa_dn = isa_dn->parent; 360 361 if (isa_dn_orig) 362 of_node_put(isa_dn_orig); 363 364 /* Count number of direct PCI children of the PHB. 365 * All PCI device nodes have class-code property, so it's 366 * an easy way to find them. 367 */ 368 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling) 369 if (get_property(tmp, "class-code", NULL)) 370 children++; 371 372 DBG("Children: %d\n", children); 373 374 /* Calculate amount of DMA window per slot. Each window must be 375 * a power of two (due to pci_alloc_consistent requirements). 376 * 377 * Keep 256MB aside for PHBs with ISA. 378 */ 379 380 if (!isa_dn) { 381 /* No ISA/IDE - just set window size and return */ 382 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */ 383 384 while (pci->phb->dma_window_size * children > 0x80000000ul) 385 pci->phb->dma_window_size >>= 1; 386 DBG("No ISA/IDE, window size is 0x%lx\n", 387 pci->phb->dma_window_size); 388 pci->phb->dma_window_base_cur = 0; 389 390 return; 391 } 392 393 /* If we have ISA, then we probably have an IDE 394 * controller too. Allocate a 128MB table but 395 * skip the first 128MB to avoid stepping on ISA 396 * space. 397 */ 398 pci->phb->dma_window_size = 0x8000000ul; 399 pci->phb->dma_window_base_cur = 0x8000000ul; 400 401 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 402 403 iommu_table_setparms(pci->phb, dn, tbl); 404 pci->iommu_table = iommu_init_table(tbl); 405 406 /* Divide the rest (1.75GB) among the children */ 407 pci->phb->dma_window_size = 0x80000000ul; 408 while (pci->phb->dma_window_size * children > 0x70000000ul) 409 pci->phb->dma_window_size >>= 1; 410 411 DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size); 412 413 } 414 415 416 static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus) 417 { 418 struct iommu_table *tbl; 419 struct device_node *dn, *pdn; 420 struct pci_dn *ppci; 421 unsigned int *dma_window = NULL; 422 423 DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self); 424 425 dn = pci_bus_to_OF_node(bus); 426 427 /* Find nearest ibm,dma-window, walking up the device tree */ 428 for (pdn = dn; pdn != NULL; pdn = pdn->parent) { 429 dma_window = (unsigned int *)get_property(pdn, "ibm,dma-window", NULL); 430 if (dma_window != NULL) 431 break; 432 } 433 434 if (dma_window == NULL) { 435 DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name); 436 return; 437 } 438 439 ppci = pdn->data; 440 if (!ppci->iommu_table) { 441 /* Bussubno hasn't been copied yet. 442 * Do it now because iommu_table_setparms_lpar needs it. 443 */ 444 445 ppci->bussubno = bus->number; 446 447 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table), 448 GFP_KERNEL); 449 450 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); 451 452 ppci->iommu_table = iommu_init_table(tbl); 453 } 454 455 if (pdn != dn) 456 PCI_DN(dn)->iommu_table = ppci->iommu_table; 457 } 458 459 460 static void iommu_dev_setup_pSeries(struct pci_dev *dev) 461 { 462 struct device_node *dn, *mydn; 463 struct iommu_table *tbl; 464 465 DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev)); 466 467 mydn = dn = pci_device_to_OF_node(dev); 468 469 /* If we're the direct child of a root bus, then we need to allocate 470 * an iommu table ourselves. The bus setup code should have setup 471 * the window sizes already. 472 */ 473 if (!dev->bus->self) { 474 DBG(" --> first child, no bridge. Allocating iommu table.\n"); 475 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 476 iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl); 477 PCI_DN(mydn)->iommu_table = iommu_init_table(tbl); 478 479 return; 480 } 481 482 /* If this device is further down the bus tree, search upwards until 483 * an already allocated iommu table is found and use that. 484 */ 485 486 while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL) 487 dn = dn->parent; 488 489 if (dn && dn->data) { 490 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; 491 } else { 492 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); 493 } 494 } 495 496 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) 497 { 498 int err = NOTIFY_OK; 499 struct device_node *np = node; 500 struct pci_dn *pci = np->data; 501 502 switch (action) { 503 case PSERIES_RECONFIG_REMOVE: 504 if (pci && pci->iommu_table && 505 get_property(np, "ibm,dma-window", NULL)) 506 iommu_free_table(np); 507 break; 508 default: 509 err = NOTIFY_DONE; 510 break; 511 } 512 return err; 513 } 514 515 static struct notifier_block iommu_reconfig_nb = { 516 .notifier_call = iommu_reconfig_notifier, 517 }; 518 519 static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) 520 { 521 struct device_node *pdn, *dn; 522 struct iommu_table *tbl; 523 int *dma_window = NULL; 524 struct pci_dn *pci; 525 526 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev)); 527 528 /* dev setup for LPAR is a little tricky, since the device tree might 529 * contain the dma-window properties per-device and not neccesarily 530 * for the bus. So we need to search upwards in the tree until we 531 * either hit a dma-window property, OR find a parent with a table 532 * already allocated. 533 */ 534 dn = pci_device_to_OF_node(dev); 535 536 for (pdn = dn; pdn && pdn->data && !PCI_DN(pdn)->iommu_table; 537 pdn = pdn->parent) { 538 dma_window = (unsigned int *) 539 get_property(pdn, "ibm,dma-window", NULL); 540 if (dma_window) 541 break; 542 } 543 544 /* Check for parent == NULL so we don't try to setup the empty EADS 545 * slots on POWER4 machines. 546 */ 547 if (dma_window == NULL || pdn->parent == NULL) { 548 DBG("No dma window for device, linking to parent\n"); 549 PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table; 550 return; 551 } else { 552 DBG("Found DMA window, allocating table\n"); 553 } 554 555 pci = pdn->data; 556 if (!pci->iommu_table) { 557 /* iommu_table_setparms_lpar needs bussubno. */ 558 pci->bussubno = pci->phb->bus->number; 559 560 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table), 561 GFP_KERNEL); 562 563 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); 564 565 pci->iommu_table = iommu_init_table(tbl); 566 } 567 568 if (pdn != dn) 569 PCI_DN(dn)->iommu_table = pci->iommu_table; 570 } 571 572 static void iommu_bus_setup_null(struct pci_bus *b) { } 573 static void iommu_dev_setup_null(struct pci_dev *d) { } 574 575 /* These are called very early. */ 576 void iommu_init_early_pSeries(void) 577 { 578 if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) { 579 /* Direct I/O, IOMMU off */ 580 ppc_md.iommu_dev_setup = iommu_dev_setup_null; 581 ppc_md.iommu_bus_setup = iommu_bus_setup_null; 582 pci_direct_iommu_init(); 583 584 return; 585 } 586 587 if (platform_is_lpar()) { 588 if (firmware_has_feature(FW_FEATURE_MULTITCE)) { 589 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 590 ppc_md.tce_free = tce_freemulti_pSeriesLP; 591 } else { 592 ppc_md.tce_build = tce_build_pSeriesLP; 593 ppc_md.tce_free = tce_free_pSeriesLP; 594 } 595 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP; 596 ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP; 597 } else { 598 ppc_md.tce_build = tce_build_pSeries; 599 ppc_md.tce_free = tce_free_pSeries; 600 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries; 601 ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries; 602 } 603 604 605 pSeries_reconfig_notifier_register(&iommu_reconfig_nb); 606 607 pci_iommu_init(); 608 } 609 610