1 /* 2 * Serverworks AGPGART routines. 3 */ 4 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/init.h> 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/agp_backend.h> 11 #include "agp.h" 12 13 #define SVWRKS_COMMAND 0x04 14 #define SVWRKS_APSIZE 0x10 15 #define SVWRKS_MMBASE 0x14 16 #define SVWRKS_CACHING 0x4b 17 #define SVWRKS_AGP_ENABLE 0x60 18 #define SVWRKS_FEATURE 0x68 19 20 #define SVWRKS_SIZE_MASK 0xfe000000 21 22 /* Memory mapped registers */ 23 #define SVWRKS_GART_CACHE 0x02 24 #define SVWRKS_GATTBASE 0x04 25 #define SVWRKS_TLBFLUSH 0x10 26 #define SVWRKS_POSTFLUSH 0x14 27 #define SVWRKS_DIRFLUSH 0x0c 28 29 30 struct serverworks_page_map { 31 unsigned long *real; 32 unsigned long __iomem *remapped; 33 }; 34 35 static struct _serverworks_private { 36 struct pci_dev *svrwrks_dev; /* device one */ 37 volatile u8 __iomem *registers; 38 struct serverworks_page_map **gatt_pages; 39 int num_tables; 40 struct serverworks_page_map scratch_dir; 41 42 int gart_addr_ofs; 43 int mm_addr_ofs; 44 } serverworks_private; 45 46 static int serverworks_create_page_map(struct serverworks_page_map *page_map) 47 { 48 int i; 49 50 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); 51 if (page_map->real == NULL) { 52 return -ENOMEM; 53 } 54 SetPageReserved(virt_to_page(page_map->real)); 55 global_cache_flush(); 56 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 57 PAGE_SIZE); 58 if (page_map->remapped == NULL) { 59 ClearPageReserved(virt_to_page(page_map->real)); 60 free_page((unsigned long) page_map->real); 61 page_map->real = NULL; 62 return -ENOMEM; 63 } 64 global_cache_flush(); 65 66 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) 67 writel(agp_bridge->scratch_page, page_map->remapped+i); 68 69 return 0; 70 } 71 72 static void serverworks_free_page_map(struct serverworks_page_map *page_map) 73 { 74 iounmap(page_map->remapped); 75 ClearPageReserved(virt_to_page(page_map->real)); 76 free_page((unsigned long) page_map->real); 77 } 78 79 static void serverworks_free_gatt_pages(void) 80 { 81 int i; 82 struct serverworks_page_map **tables; 83 struct serverworks_page_map *entry; 84 85 tables = serverworks_private.gatt_pages; 86 for(i = 0; i < serverworks_private.num_tables; i++) { 87 entry = tables[i]; 88 if (entry != NULL) { 89 if (entry->real != NULL) { 90 serverworks_free_page_map(entry); 91 } 92 kfree(entry); 93 } 94 } 95 kfree(tables); 96 } 97 98 static int serverworks_create_gatt_pages(int nr_tables) 99 { 100 struct serverworks_page_map **tables; 101 struct serverworks_page_map *entry; 102 int retval = 0; 103 int i; 104 105 tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 106 GFP_KERNEL); 107 if (tables == NULL) { 108 return -ENOMEM; 109 } 110 memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1)); 111 for (i = 0; i < nr_tables; i++) { 112 entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); 113 if (entry == NULL) { 114 retval = -ENOMEM; 115 break; 116 } 117 memset(entry, 0, sizeof(struct serverworks_page_map)); 118 tables[i] = entry; 119 retval = serverworks_create_page_map(entry); 120 if (retval != 0) break; 121 } 122 serverworks_private.num_tables = nr_tables; 123 serverworks_private.gatt_pages = tables; 124 125 if (retval != 0) serverworks_free_gatt_pages(); 126 127 return retval; 128 } 129 130 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ 131 GET_PAGE_DIR_IDX(addr)]->remapped) 132 133 #ifndef GET_PAGE_DIR_OFF 134 #define GET_PAGE_DIR_OFF(addr) (addr >> 22) 135 #endif 136 137 #ifndef GET_PAGE_DIR_IDX 138 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ 139 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) 140 #endif 141 142 #ifndef GET_GATT_OFF 143 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 144 #endif 145 146 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge) 147 { 148 struct aper_size_info_lvl2 *value; 149 struct serverworks_page_map page_dir; 150 int retval; 151 u32 temp; 152 int i; 153 154 value = A_SIZE_LVL2(agp_bridge->current_size); 155 retval = serverworks_create_page_map(&page_dir); 156 if (retval != 0) { 157 return retval; 158 } 159 retval = serverworks_create_page_map(&serverworks_private.scratch_dir); 160 if (retval != 0) { 161 serverworks_free_page_map(&page_dir); 162 return retval; 163 } 164 /* Create a fake scratch directory */ 165 for(i = 0; i < 1024; i++) { 166 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); 167 writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 168 } 169 170 retval = serverworks_create_gatt_pages(value->num_entries / 1024); 171 if (retval != 0) { 172 serverworks_free_page_map(&page_dir); 173 serverworks_free_page_map(&serverworks_private.scratch_dir); 174 return retval; 175 } 176 177 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 178 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 179 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 180 181 /* Get the address for the gart region. 182 * This is a bus address even on the alpha, b/c its 183 * used to program the agp master not the cpu 184 */ 185 186 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); 187 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 188 189 /* Calculate the agp offset */ 190 191 for(i = 0; i < value->num_entries / 1024; i++) 192 writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 193 194 return 0; 195 } 196 197 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge) 198 { 199 struct serverworks_page_map page_dir; 200 201 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; 202 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; 203 204 serverworks_free_gatt_pages(); 205 serverworks_free_page_map(&page_dir); 206 serverworks_free_page_map(&serverworks_private.scratch_dir); 207 return 0; 208 } 209 210 static int serverworks_fetch_size(void) 211 { 212 int i; 213 u32 temp; 214 u32 temp2; 215 struct aper_size_info_lvl2 *values; 216 217 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); 218 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); 219 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, 220 SVWRKS_SIZE_MASK); 221 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); 222 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); 223 temp2 &= SVWRKS_SIZE_MASK; 224 225 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 226 if (temp2 == values[i].size_value) { 227 agp_bridge->previous_size = 228 agp_bridge->current_size = (void *) (values + i); 229 230 agp_bridge->aperture_size_idx = i; 231 return values[i].size; 232 } 233 } 234 235 return 0; 236 } 237 238 /* 239 * This routine could be implemented by taking the addresses 240 * written to the GATT, and flushing them individually. However 241 * currently it just flushes the whole table. Which is probably 242 * more efficent, since agp_memory blocks can be a large number of 243 * entries. 244 */ 245 static void serverworks_tlbflush(struct agp_memory *temp) 246 { 247 writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); 248 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) 249 cpu_relax(); 250 251 writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); 252 while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) 253 cpu_relax(); 254 } 255 256 static int serverworks_configure(void) 257 { 258 struct aper_size_info_lvl2 *current_size; 259 u32 temp; 260 u8 enable_reg; 261 u16 cap_reg; 262 263 current_size = A_SIZE_LVL2(agp_bridge->current_size); 264 265 /* Get the memory mapped registers */ 266 pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); 267 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); 268 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); 269 if (!serverworks_private.registers) { 270 printk (KERN_ERR PFX "Unable to ioremap() memory.\n"); 271 return -ENOMEM; 272 } 273 274 writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE); 275 readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */ 276 277 writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE); 278 readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */ 279 280 cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND); 281 cap_reg &= ~0x0007; 282 cap_reg |= 0x4; 283 writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND); 284 readw(serverworks_private.registers+SVWRKS_COMMAND); 285 286 pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg); 287 enable_reg |= 0x1; /* Agp Enable bit */ 288 pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg); 289 serverworks_tlbflush(NULL); 290 291 agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); 292 293 /* Fill in the mode register */ 294 pci_read_config_dword(serverworks_private.svrwrks_dev, 295 agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); 296 297 pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); 298 enable_reg &= ~0x3; 299 pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); 300 301 pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); 302 enable_reg |= (1<<6); 303 pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); 304 305 return 0; 306 } 307 308 static void serverworks_cleanup(void) 309 { 310 iounmap((void __iomem *) serverworks_private.registers); 311 } 312 313 static int serverworks_insert_memory(struct agp_memory *mem, 314 off_t pg_start, int type) 315 { 316 int i, j, num_entries; 317 unsigned long __iomem *cur_gatt; 318 unsigned long addr; 319 320 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 321 322 if (type != 0 || mem->type != 0) { 323 return -EINVAL; 324 } 325 if ((pg_start + mem->page_count) > num_entries) { 326 return -EINVAL; 327 } 328 329 j = pg_start; 330 while (j < (pg_start + mem->page_count)) { 331 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 332 cur_gatt = SVRWRKS_GET_GATT(addr); 333 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) 334 return -EBUSY; 335 j++; 336 } 337 338 if (mem->is_flushed == FALSE) { 339 global_cache_flush(); 340 mem->is_flushed = TRUE; 341 } 342 343 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 344 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 345 cur_gatt = SVRWRKS_GET_GATT(addr); 346 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 347 } 348 serverworks_tlbflush(mem); 349 return 0; 350 } 351 352 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, 353 int type) 354 { 355 int i; 356 unsigned long __iomem *cur_gatt; 357 unsigned long addr; 358 359 if (type != 0 || mem->type != 0) { 360 return -EINVAL; 361 } 362 363 global_cache_flush(); 364 serverworks_tlbflush(mem); 365 366 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 367 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; 368 cur_gatt = SVRWRKS_GET_GATT(addr); 369 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); 370 } 371 372 serverworks_tlbflush(mem); 373 return 0; 374 } 375 376 static struct gatt_mask serverworks_masks[] = 377 { 378 {.mask = 1, .type = 0} 379 }; 380 381 static struct aper_size_info_lvl2 serverworks_sizes[7] = 382 { 383 {2048, 524288, 0x80000000}, 384 {1024, 262144, 0xc0000000}, 385 {512, 131072, 0xe0000000}, 386 {256, 65536, 0xf0000000}, 387 {128, 32768, 0xf8000000}, 388 {64, 16384, 0xfc000000}, 389 {32, 8192, 0xfe000000} 390 }; 391 392 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) 393 { 394 u32 command; 395 396 pci_read_config_dword(serverworks_private.svrwrks_dev, 397 bridge->capndx + PCI_AGP_STATUS, 398 &command); 399 400 command = agp_collect_device_status(bridge, mode, command); 401 402 command &= ~0x10; /* disable FW */ 403 command &= ~0x08; 404 405 command |= 0x100; 406 407 pci_write_config_dword(serverworks_private.svrwrks_dev, 408 bridge->capndx + PCI_AGP_COMMAND, 409 command); 410 411 agp_device_command(command, 0); 412 } 413 414 static struct agp_bridge_driver sworks_driver = { 415 .owner = THIS_MODULE, 416 .aperture_sizes = serverworks_sizes, 417 .size_type = LVL2_APER_SIZE, 418 .num_aperture_sizes = 7, 419 .configure = serverworks_configure, 420 .fetch_size = serverworks_fetch_size, 421 .cleanup = serverworks_cleanup, 422 .tlb_flush = serverworks_tlbflush, 423 .mask_memory = agp_generic_mask_memory, 424 .masks = serverworks_masks, 425 .agp_enable = serverworks_agp_enable, 426 .cache_flush = global_cache_flush, 427 .create_gatt_table = serverworks_create_gatt_table, 428 .free_gatt_table = serverworks_free_gatt_table, 429 .insert_memory = serverworks_insert_memory, 430 .remove_memory = serverworks_remove_memory, 431 .alloc_by_type = agp_generic_alloc_by_type, 432 .free_by_type = agp_generic_free_by_type, 433 .agp_alloc_page = agp_generic_alloc_page, 434 .agp_destroy_page = agp_generic_destroy_page, 435 }; 436 437 static int __devinit agp_serverworks_probe(struct pci_dev *pdev, 438 const struct pci_device_id *ent) 439 { 440 struct agp_bridge_data *bridge; 441 struct pci_dev *bridge_dev; 442 u32 temp, temp2; 443 u8 cap_ptr = 0; 444 445 /* Everything is on func 1 here so we are hardcoding function one */ 446 bridge_dev = pci_find_slot((unsigned int)pdev->bus->number, 447 PCI_DEVFN(0, 1)); 448 if (!bridge_dev) { 449 printk(KERN_INFO PFX "Detected a Serverworks chipset " 450 "but could not find the secondary device.\n"); 451 return -ENODEV; 452 } 453 454 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 455 456 switch (pdev->device) { 457 case 0x0006: 458 /* ServerWorks CNB20HE 459 Fail silently.*/ 460 printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n"); 461 return -ENODEV; 462 463 case PCI_DEVICE_ID_SERVERWORKS_HE: 464 case PCI_DEVICE_ID_SERVERWORKS_LE: 465 case 0x0007: 466 break; 467 468 default: 469 if (cap_ptr) 470 printk(KERN_ERR PFX "Unsupported Serverworks chipset " 471 "(device id: %04x)\n", pdev->device); 472 return -ENODEV; 473 } 474 475 serverworks_private.svrwrks_dev = bridge_dev; 476 serverworks_private.gart_addr_ofs = 0x10; 477 478 pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp); 479 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 480 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); 481 if (temp2 != 0) { 482 printk(KERN_INFO PFX "Detected 64 bit aperture address, " 483 "but top bits are not zero. Disabling agp\n"); 484 return -ENODEV; 485 } 486 serverworks_private.mm_addr_ofs = 0x18; 487 } else 488 serverworks_private.mm_addr_ofs = 0x14; 489 490 pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp); 491 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 492 pci_read_config_dword(pdev, 493 serverworks_private.mm_addr_ofs + 4, &temp2); 494 if (temp2 != 0) { 495 printk(KERN_INFO PFX "Detected 64 bit MMIO address, " 496 "but top bits are not zero. Disabling agp\n"); 497 return -ENODEV; 498 } 499 } 500 501 bridge = agp_alloc_bridge(); 502 if (!bridge) 503 return -ENOMEM; 504 505 bridge->driver = &sworks_driver; 506 bridge->dev_private_data = &serverworks_private, 507 bridge->dev = pdev; 508 509 pci_set_drvdata(pdev, bridge); 510 return agp_add_bridge(bridge); 511 } 512 513 static void __devexit agp_serverworks_remove(struct pci_dev *pdev) 514 { 515 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 516 517 agp_remove_bridge(bridge); 518 agp_put_bridge(bridge); 519 } 520 521 static struct pci_device_id agp_serverworks_pci_table[] = { 522 { 523 .class = (PCI_CLASS_BRIDGE_HOST << 8), 524 .class_mask = ~0, 525 .vendor = PCI_VENDOR_ID_SERVERWORKS, 526 .device = PCI_ANY_ID, 527 .subvendor = PCI_ANY_ID, 528 .subdevice = PCI_ANY_ID, 529 }, 530 { } 531 }; 532 533 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); 534 535 static struct pci_driver agp_serverworks_pci_driver = { 536 .name = "agpgart-serverworks", 537 .id_table = agp_serverworks_pci_table, 538 .probe = agp_serverworks_probe, 539 .remove = agp_serverworks_remove, 540 }; 541 542 static int __init agp_serverworks_init(void) 543 { 544 if (agp_off) 545 return -EINVAL; 546 return pci_register_driver(&agp_serverworks_pci_driver); 547 } 548 549 static void __exit agp_serverworks_cleanup(void) 550 { 551 pci_unregister_driver(&agp_serverworks_pci_driver); 552 } 553 554 module_init(agp_serverworks_init); 555 module_exit(agp_serverworks_cleanup); 556 557 MODULE_LICENSE("GPL and additional rights"); 558 559