1 /* 2 * Serverworks AGPGART routines. 3 */ 4 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/init.h> 8 #include <linux/agp_backend.h> 9 #include "agp.h" 10 11 #define SVWRKS_COMMAND 0x04 12 #define SVWRKS_APSIZE 0x10 13 #define SVWRKS_MMBASE 0x14 14 #define SVWRKS_CACHING 0x4b 15 #define SVWRKS_AGP_ENABLE 0x60 16 #define SVWRKS_FEATURE 0x68 17 18 #define SVWRKS_SIZE_MASK 0xfe000000 19 20 /* Memory mapped registers */ 21 #define SVWRKS_GART_CACHE 0x02 22 #define SVWRKS_GATTBASE 0x04 23 #define SVWRKS_TLBFLUSH 0x10 24 #define SVWRKS_POSTFLUSH 0x14 25 #define SVWRKS_DIRFLUSH 0x0c 26 27 28 struct serverworks_page_map { 29 unsigned long *real; 30 unsigned long __iomem *remapped; 31 }; 32 33 static struct _serverworks_private { 34 struct pci_dev *svrwrks_dev; /* device one */ 35 volatile u8 __iomem *registers; 36 struct serverworks_page_map **gatt_pages; 37 int num_tables; 38 struct serverworks_page_map scratch_dir; 39 40 int gart_addr_ofs; 41 int mm_addr_ofs; 42 } serverworks_private; 43 44 static int serverworks_create_page_map(struct serverworks_page_map *page_map) 45 { 46 int i; 47 48 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); 49 if (page_map->real == NULL) { 50 return -ENOMEM; 51 } 52 SetPageReserved(virt_to_page(page_map->real)); 53 global_cache_flush(); 54 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 55 PAGE_SIZE); 56 if (page_map->remapped == NULL) { 57 ClearPageReserved(virt_to_page(page_map->real)); 58 free_page((unsigned long) page_map->real); 59 page_map->real = NULL; 60 return -ENOMEM; 61 } 62 global_cache_flush(); 63 64 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) 65 writel(agp_bridge->scratch_page, page_map->remapped+i); 66 67 return 0; 68 } 69 70 static void serverworks_free_page_map(struct serverworks_page_map *page_map) 71 { 72 iounmap(page_map->remapped); 73 ClearPageReserved(virt_to_page(page_map->real)); 74 free_page((unsigned long) page_map->real); 75 } 76 77 static void serverworks_free_gatt_pages(void) 78 { 79 int i; 80 struct serverworks_page_map **tables; 81 struct serverworks_page_map *entry; 82 83 tables = serverworks_private.gatt_pages; 84 for(i = 0; i < serverworks_private.num_tables; i++) { 85 entry = tables[i]; 86 if (entry != NULL) { 87 if (entry->real != NULL) { 88 serverworks_free_page_map(entry); 89 } 90 kfree(entry); 91 } 92 } 93 kfree(tables); 94 } 95 96 static int serverworks_create_gatt_pages(int nr_tables) 97 { 98 struct serverworks_page_map **tables; 99 struct serverworks_page_map *entry; 100 int retval = 0; 101 int i; 102 103 tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 104 GFP_KERNEL); 105 if (tables == NULL) { 106 return -ENOMEM; 107 } 108 memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1)); 109 for (i = 0; i < nr_tables; i++) { 110 entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); 111 if (entry == NULL) { 112 retval = -ENOMEM; 113 break; 114 } 115 memset(entry, 0, sizeof(struct serverworks_page_map)); 116 tables[i] = entry; 117 retval = serverworks_create_page_map(entry); 118 if (retval != 0) break; 119 } 120 serverworks_private.num_tables = nr_tables; 121 serverworks_private.gatt_pages = tables; 122 123 if (retval != 0) serverworks_free_gatt_pages(); 124 125 return retval; 126 } 127 128 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ 129 GET_PAGE_DIR_IDX(addr)]->remapped) 130 131 #ifndef GET_PAGE_DIR_OFF 132 #define GET_PAGE_DIR_OFF(addr) (addr >> 22) 133 #endif 134 135 #ifndef GET_PAGE_DIR_IDX 136 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ 137 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) 138 #endif 139 140 #ifndef GET_GATT_OFF 141 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 142 #endif 143 144 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge) 145 { 146 struct aper_size_info_lvl2 *value; 147 struct serverworks_page_map page_dir; 148 int retval; 149 u32 temp; 150 int i; 151 152 value = A_SIZE_LVL2(agp_bridge->current_size); 153 retval = serverworks_create_page_map(&page_dir); 154 if (retval != 0) { 155 return retval; 156 } 157 retval = serverworks_create_page_map(&serverworks_private.scratch_dir); 158 if (retval != 0) { 159 serverworks_free_page_map(&page_dir); 160 return retval; 161 } 162 /* Create a fake scratch directory */ 163 for(i = 0; i < 1024; i++) { 164 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); 165 writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 166 } 167 168 retval = serverworks_create_gatt_pages(value->num_entries / 1024); 169 if (retval != 0) { 170 serverworks_free_page_map(&page_dir); 171 serverworks_free_page_map(&serverworks_private.scratch_dir); 172 return retval; 173 } 174 175 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 176 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 177 agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); 178 179 /* Get the address for the gart region. 180 * This is a bus address even on the alpha, b/c its 181 * used to program the agp master not the cpu 182 */ 183 184 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); 185 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 186 187 /* Calculate the agp offset */ 188 189 for(i = 0; i < value->num_entries / 1024; i++) 190 writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 191 192 return 0; 193 } 194 195 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge) 196 { 197 struct serverworks_page_map page_dir; 198 199 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; 200 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; 201 202 serverworks_free_gatt_pages(); 203 serverworks_free_page_map(&page_dir); 204 serverworks_free_page_map(&serverworks_private.scratch_dir); 205 return 0; 206 } 207 208 static int serverworks_fetch_size(void) 209 { 210 int i; 211 u32 temp; 212 u32 temp2; 213 struct aper_size_info_lvl2 *values; 214 215 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); 216 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); 217 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, 218 SVWRKS_SIZE_MASK); 219 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); 220 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); 221 temp2 &= SVWRKS_SIZE_MASK; 222 223 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 224 if (temp2 == values[i].size_value) { 225 agp_bridge->previous_size = 226 agp_bridge->current_size = (void *) (values + i); 227 228 agp_bridge->aperture_size_idx = i; 229 return values[i].size; 230 } 231 } 232 233 return 0; 234 } 235 236 /* 237 * This routine could be implemented by taking the addresses 238 * written to the GATT, and flushing them individually. However 239 * currently it just flushes the whole table. Which is probably 240 * more efficent, since agp_memory blocks can be a large number of 241 * entries. 242 */ 243 static void serverworks_tlbflush(struct agp_memory *temp) 244 { 245 writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); 246 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) 247 cpu_relax(); 248 249 writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); 250 while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) 251 cpu_relax(); 252 } 253 254 static int serverworks_configure(void) 255 { 256 struct aper_size_info_lvl2 *current_size; 257 u32 temp; 258 u8 enable_reg; 259 u16 cap_reg; 260 261 current_size = A_SIZE_LVL2(agp_bridge->current_size); 262 263 /* Get the memory mapped registers */ 264 pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); 265 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); 266 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); 267 if (!serverworks_private.registers) { 268 printk (KERN_ERR PFX "Unable to ioremap() memory.\n"); 269 return -ENOMEM; 270 } 271 272 writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE); 273 readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */ 274 275 writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE); 276 readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */ 277 278 cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND); 279 cap_reg &= ~0x0007; 280 cap_reg |= 0x4; 281 writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND); 282 readw(serverworks_private.registers+SVWRKS_COMMAND); 283 284 pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg); 285 enable_reg |= 0x1; /* Agp Enable bit */ 286 pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg); 287 serverworks_tlbflush(NULL); 288 289 agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); 290 291 /* Fill in the mode register */ 292 pci_read_config_dword(serverworks_private.svrwrks_dev, 293 agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); 294 295 pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); 296 enable_reg &= ~0x3; 297 pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); 298 299 pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); 300 enable_reg |= (1<<6); 301 pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); 302 303 return 0; 304 } 305 306 static void serverworks_cleanup(void) 307 { 308 iounmap((void __iomem *) serverworks_private.registers); 309 } 310 311 static int serverworks_insert_memory(struct agp_memory *mem, 312 off_t pg_start, int type) 313 { 314 int i, j, num_entries; 315 unsigned long __iomem *cur_gatt; 316 unsigned long addr; 317 318 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 319 320 if (type != 0 || mem->type != 0) { 321 return -EINVAL; 322 } 323 if ((pg_start + mem->page_count) > num_entries) { 324 return -EINVAL; 325 } 326 327 j = pg_start; 328 while (j < (pg_start + mem->page_count)) { 329 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 330 cur_gatt = SVRWRKS_GET_GATT(addr); 331 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) 332 return -EBUSY; 333 j++; 334 } 335 336 if (mem->is_flushed == FALSE) { 337 global_cache_flush(); 338 mem->is_flushed = TRUE; 339 } 340 341 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 342 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 343 cur_gatt = SVRWRKS_GET_GATT(addr); 344 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 345 } 346 serverworks_tlbflush(mem); 347 return 0; 348 } 349 350 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, 351 int type) 352 { 353 int i; 354 unsigned long __iomem *cur_gatt; 355 unsigned long addr; 356 357 if (type != 0 || mem->type != 0) { 358 return -EINVAL; 359 } 360 361 global_cache_flush(); 362 serverworks_tlbflush(mem); 363 364 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 365 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; 366 cur_gatt = SVRWRKS_GET_GATT(addr); 367 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); 368 } 369 370 serverworks_tlbflush(mem); 371 return 0; 372 } 373 374 static struct gatt_mask serverworks_masks[] = 375 { 376 {.mask = 1, .type = 0} 377 }; 378 379 static struct aper_size_info_lvl2 serverworks_sizes[7] = 380 { 381 {2048, 524288, 0x80000000}, 382 {1024, 262144, 0xc0000000}, 383 {512, 131072, 0xe0000000}, 384 {256, 65536, 0xf0000000}, 385 {128, 32768, 0xf8000000}, 386 {64, 16384, 0xfc000000}, 387 {32, 8192, 0xfe000000} 388 }; 389 390 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) 391 { 392 u32 command; 393 394 pci_read_config_dword(serverworks_private.svrwrks_dev, 395 bridge->capndx + PCI_AGP_STATUS, 396 &command); 397 398 command = agp_collect_device_status(bridge, mode, command); 399 400 command &= ~0x10; /* disable FW */ 401 command &= ~0x08; 402 403 command |= 0x100; 404 405 pci_write_config_dword(serverworks_private.svrwrks_dev, 406 bridge->capndx + PCI_AGP_COMMAND, 407 command); 408 409 agp_device_command(command, 0); 410 } 411 412 static struct agp_bridge_driver sworks_driver = { 413 .owner = THIS_MODULE, 414 .aperture_sizes = serverworks_sizes, 415 .size_type = LVL2_APER_SIZE, 416 .num_aperture_sizes = 7, 417 .configure = serverworks_configure, 418 .fetch_size = serverworks_fetch_size, 419 .cleanup = serverworks_cleanup, 420 .tlb_flush = serverworks_tlbflush, 421 .mask_memory = agp_generic_mask_memory, 422 .masks = serverworks_masks, 423 .agp_enable = serverworks_agp_enable, 424 .cache_flush = global_cache_flush, 425 .create_gatt_table = serverworks_create_gatt_table, 426 .free_gatt_table = serverworks_free_gatt_table, 427 .insert_memory = serverworks_insert_memory, 428 .remove_memory = serverworks_remove_memory, 429 .alloc_by_type = agp_generic_alloc_by_type, 430 .free_by_type = agp_generic_free_by_type, 431 .agp_alloc_page = agp_generic_alloc_page, 432 .agp_destroy_page = agp_generic_destroy_page, 433 }; 434 435 static int __devinit agp_serverworks_probe(struct pci_dev *pdev, 436 const struct pci_device_id *ent) 437 { 438 struct agp_bridge_data *bridge; 439 struct pci_dev *bridge_dev; 440 u32 temp, temp2; 441 u8 cap_ptr = 0; 442 443 /* Everything is on func 1 here so we are hardcoding function one */ 444 bridge_dev = pci_find_slot((unsigned int)pdev->bus->number, 445 PCI_DEVFN(0, 1)); 446 if (!bridge_dev) { 447 printk(KERN_INFO PFX "Detected a Serverworks chipset " 448 "but could not find the secondary device.\n"); 449 return -ENODEV; 450 } 451 452 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 453 454 switch (pdev->device) { 455 case 0x0006: 456 /* ServerWorks CNB20HE 457 Fail silently.*/ 458 printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n"); 459 return -ENODEV; 460 461 case PCI_DEVICE_ID_SERVERWORKS_HE: 462 case PCI_DEVICE_ID_SERVERWORKS_LE: 463 case 0x0007: 464 break; 465 466 default: 467 if (cap_ptr) 468 printk(KERN_ERR PFX "Unsupported Serverworks chipset " 469 "(device id: %04x)\n", pdev->device); 470 return -ENODEV; 471 } 472 473 serverworks_private.svrwrks_dev = bridge_dev; 474 serverworks_private.gart_addr_ofs = 0x10; 475 476 pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp); 477 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 478 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); 479 if (temp2 != 0) { 480 printk(KERN_INFO PFX "Detected 64 bit aperture address, " 481 "but top bits are not zero. Disabling agp\n"); 482 return -ENODEV; 483 } 484 serverworks_private.mm_addr_ofs = 0x18; 485 } else 486 serverworks_private.mm_addr_ofs = 0x14; 487 488 pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp); 489 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 490 pci_read_config_dword(pdev, 491 serverworks_private.mm_addr_ofs + 4, &temp2); 492 if (temp2 != 0) { 493 printk(KERN_INFO PFX "Detected 64 bit MMIO address, " 494 "but top bits are not zero. Disabling agp\n"); 495 return -ENODEV; 496 } 497 } 498 499 bridge = agp_alloc_bridge(); 500 if (!bridge) 501 return -ENOMEM; 502 503 bridge->driver = &sworks_driver; 504 bridge->dev_private_data = &serverworks_private, 505 bridge->dev = pdev; 506 507 pci_set_drvdata(pdev, bridge); 508 return agp_add_bridge(bridge); 509 } 510 511 static void __devexit agp_serverworks_remove(struct pci_dev *pdev) 512 { 513 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 514 515 agp_remove_bridge(bridge); 516 agp_put_bridge(bridge); 517 } 518 519 static struct pci_device_id agp_serverworks_pci_table[] = { 520 { 521 .class = (PCI_CLASS_BRIDGE_HOST << 8), 522 .class_mask = ~0, 523 .vendor = PCI_VENDOR_ID_SERVERWORKS, 524 .device = PCI_ANY_ID, 525 .subvendor = PCI_ANY_ID, 526 .subdevice = PCI_ANY_ID, 527 }, 528 { } 529 }; 530 531 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); 532 533 static struct pci_driver agp_serverworks_pci_driver = { 534 .name = "agpgart-serverworks", 535 .id_table = agp_serverworks_pci_table, 536 .probe = agp_serverworks_probe, 537 .remove = agp_serverworks_remove, 538 }; 539 540 static int __init agp_serverworks_init(void) 541 { 542 if (agp_off) 543 return -EINVAL; 544 return pci_register_driver(&agp_serverworks_pci_driver); 545 } 546 547 static void __exit agp_serverworks_cleanup(void) 548 { 549 pci_unregister_driver(&agp_serverworks_pci_driver); 550 } 551 552 module_init(agp_serverworks_init); 553 module_exit(agp_serverworks_cleanup); 554 555 MODULE_LICENSE("GPL and additional rights"); 556 557