1 #include <linux/pci.h> 2 #include <linux/acpi.h> 3 #include <linux/init.h> 4 #include <linux/irq.h> 5 #include <linux/dmi.h> 6 #include <linux/slab.h> 7 #include <asm/numa.h> 8 #include <asm/pci_x86.h> 9 10 struct pci_root_info { 11 struct acpi_device *bridge; 12 char *name; 13 unsigned int res_num; 14 struct resource *res; 15 struct pci_bus *bus; 16 int busnum; 17 }; 18 19 static bool pci_use_crs = true; 20 21 static int __init set_use_crs(const struct dmi_system_id *id) 22 { 23 pci_use_crs = true; 24 return 0; 25 } 26 27 static const struct dmi_system_id pci_use_crs_table[] __initconst = { 28 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */ 29 { 30 .callback = set_use_crs, 31 .ident = "IBM System x3800", 32 .matches = { 33 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), 34 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"), 35 }, 36 }, 37 {} 38 }; 39 40 void __init pci_acpi_crs_quirks(void) 41 { 42 int year; 43 44 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) 45 pci_use_crs = false; 46 47 dmi_check_system(pci_use_crs_table); 48 49 /* 50 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that 51 * takes precedence over anything we figured out above. 52 */ 53 if (pci_probe & PCI_ROOT_NO_CRS) 54 pci_use_crs = false; 55 else if (pci_probe & PCI_USE__CRS) 56 pci_use_crs = true; 57 58 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; " 59 "if necessary, use \"pci=%s\" and report a bug\n", 60 pci_use_crs ? "Using" : "Ignoring", 61 pci_use_crs ? "nocrs" : "use_crs"); 62 } 63 64 static acpi_status 65 resource_to_addr(struct acpi_resource *resource, 66 struct acpi_resource_address64 *addr) 67 { 68 acpi_status status; 69 struct acpi_resource_memory24 *memory24; 70 struct acpi_resource_memory32 *memory32; 71 struct acpi_resource_fixed_memory32 *fixed_memory32; 72 73 memset(addr, 0, sizeof(*addr)); 74 switch (resource->type) { 75 case ACPI_RESOURCE_TYPE_MEMORY24: 76 memory24 = &resource->data.memory24; 77 addr->resource_type = ACPI_MEMORY_RANGE; 78 addr->minimum = memory24->minimum; 79 addr->address_length = memory24->address_length; 80 addr->maximum = addr->minimum + addr->address_length - 1; 81 return AE_OK; 82 case ACPI_RESOURCE_TYPE_MEMORY32: 83 memory32 = &resource->data.memory32; 84 addr->resource_type = ACPI_MEMORY_RANGE; 85 addr->minimum = memory32->minimum; 86 addr->address_length = memory32->address_length; 87 addr->maximum = addr->minimum + addr->address_length - 1; 88 return AE_OK; 89 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 90 fixed_memory32 = &resource->data.fixed_memory32; 91 addr->resource_type = ACPI_MEMORY_RANGE; 92 addr->minimum = fixed_memory32->address; 93 addr->address_length = fixed_memory32->address_length; 94 addr->maximum = addr->minimum + addr->address_length - 1; 95 return AE_OK; 96 case ACPI_RESOURCE_TYPE_ADDRESS16: 97 case ACPI_RESOURCE_TYPE_ADDRESS32: 98 case ACPI_RESOURCE_TYPE_ADDRESS64: 99 status = acpi_resource_to_address64(resource, addr); 100 if (ACPI_SUCCESS(status) && 101 (addr->resource_type == ACPI_MEMORY_RANGE || 102 addr->resource_type == ACPI_IO_RANGE) && 103 addr->address_length > 0) { 104 return AE_OK; 105 } 106 break; 107 } 108 return AE_ERROR; 109 } 110 111 static acpi_status 112 count_resource(struct acpi_resource *acpi_res, void *data) 113 { 114 struct pci_root_info *info = data; 115 struct acpi_resource_address64 addr; 116 acpi_status status; 117 118 status = resource_to_addr(acpi_res, &addr); 119 if (ACPI_SUCCESS(status)) 120 info->res_num++; 121 return AE_OK; 122 } 123 124 static acpi_status 125 setup_resource(struct acpi_resource *acpi_res, void *data) 126 { 127 struct pci_root_info *info = data; 128 struct resource *res; 129 struct acpi_resource_address64 addr; 130 acpi_status status; 131 unsigned long flags; 132 struct resource *root, *conflict; 133 u64 start, end; 134 135 status = resource_to_addr(acpi_res, &addr); 136 if (!ACPI_SUCCESS(status)) 137 return AE_OK; 138 139 if (addr.resource_type == ACPI_MEMORY_RANGE) { 140 root = &iomem_resource; 141 flags = IORESOURCE_MEM; 142 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) 143 flags |= IORESOURCE_PREFETCH; 144 } else if (addr.resource_type == ACPI_IO_RANGE) { 145 root = &ioport_resource; 146 flags = IORESOURCE_IO; 147 } else 148 return AE_OK; 149 150 start = addr.minimum + addr.translation_offset; 151 end = addr.maximum + addr.translation_offset; 152 153 res = &info->res[info->res_num]; 154 res->name = info->name; 155 res->flags = flags; 156 res->start = start; 157 res->end = end; 158 res->child = NULL; 159 160 if (!pci_use_crs) { 161 dev_printk(KERN_DEBUG, &info->bridge->dev, 162 "host bridge window %pR (ignored)\n", res); 163 return AE_OK; 164 } 165 166 conflict = insert_resource_conflict(root, res); 167 if (conflict) { 168 dev_err(&info->bridge->dev, 169 "address space collision: host bridge window %pR " 170 "conflicts with %s %pR\n", 171 res, conflict->name, conflict); 172 } else { 173 pci_bus_add_resource(info->bus, res, 0); 174 info->res_num++; 175 if (addr.translation_offset) 176 dev_info(&info->bridge->dev, "host bridge window %pR " 177 "(PCI address [%#llx-%#llx])\n", 178 res, res->start - addr.translation_offset, 179 res->end - addr.translation_offset); 180 else 181 dev_info(&info->bridge->dev, 182 "host bridge window %pR\n", res); 183 } 184 return AE_OK; 185 } 186 187 static void 188 get_current_resources(struct acpi_device *device, int busnum, 189 int domain, struct pci_bus *bus) 190 { 191 struct pci_root_info info; 192 size_t size; 193 194 if (pci_use_crs) 195 pci_bus_remove_resources(bus); 196 197 info.bridge = device; 198 info.bus = bus; 199 info.res_num = 0; 200 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, 201 &info); 202 if (!info.res_num) 203 return; 204 205 size = sizeof(*info.res) * info.res_num; 206 info.res = kmalloc(size, GFP_KERNEL); 207 if (!info.res) 208 goto res_alloc_fail; 209 210 info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum); 211 if (!info.name) 212 goto name_alloc_fail; 213 214 info.res_num = 0; 215 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, 216 &info); 217 218 return; 219 220 name_alloc_fail: 221 kfree(info.res); 222 res_alloc_fail: 223 return; 224 } 225 226 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) 227 { 228 struct acpi_device *device = root->device; 229 int domain = root->segment; 230 int busnum = root->secondary.start; 231 struct pci_bus *bus; 232 struct pci_sysdata *sd; 233 int node; 234 #ifdef CONFIG_ACPI_NUMA 235 int pxm; 236 #endif 237 238 if (domain && !pci_domains_supported) { 239 printk(KERN_WARNING "pci_bus %04x:%02x: " 240 "ignored (multiple domains not supported)\n", 241 domain, busnum); 242 return NULL; 243 } 244 245 node = -1; 246 #ifdef CONFIG_ACPI_NUMA 247 pxm = acpi_get_pxm(device->handle); 248 if (pxm >= 0) 249 node = pxm_to_node(pxm); 250 if (node != -1) 251 set_mp_bus_to_node(busnum, node); 252 else 253 #endif 254 node = get_mp_bus_to_node(busnum); 255 256 if (node != -1 && !node_online(node)) 257 node = -1; 258 259 /* Allocate per-root-bus (not per bus) arch-specific data. 260 * TODO: leak; this memory is never freed. 261 * It's arguable whether it's worth the trouble to care. 262 */ 263 sd = kzalloc(sizeof(*sd), GFP_KERNEL); 264 if (!sd) { 265 printk(KERN_WARNING "pci_bus %04x:%02x: " 266 "ignored (out of memory)\n", domain, busnum); 267 return NULL; 268 } 269 270 sd->domain = domain; 271 sd->node = node; 272 /* 273 * Maybe the desired pci bus has been already scanned. In such case 274 * it is unnecessary to scan the pci bus with the given domain,busnum. 275 */ 276 bus = pci_find_bus(domain, busnum); 277 if (bus) { 278 /* 279 * If the desired bus exits, the content of bus->sysdata will 280 * be replaced by sd. 281 */ 282 memcpy(bus->sysdata, sd, sizeof(*sd)); 283 kfree(sd); 284 } else { 285 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd); 286 if (bus) { 287 get_current_resources(device, busnum, domain, bus); 288 bus->subordinate = pci_scan_child_bus(bus); 289 } 290 } 291 292 if (!bus) 293 kfree(sd); 294 295 if (bus && node != -1) { 296 #ifdef CONFIG_ACPI_NUMA 297 if (pxm >= 0) 298 dev_printk(KERN_DEBUG, &bus->dev, 299 "on NUMA node %d (pxm %d)\n", node, pxm); 300 #else 301 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node); 302 #endif 303 } 304 305 return bus; 306 } 307 308 int __init pci_acpi_init(void) 309 { 310 struct pci_dev *dev = NULL; 311 312 if (acpi_noirq) 313 return -ENODEV; 314 315 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); 316 acpi_irq_penalty_init(); 317 pcibios_enable_irq = acpi_pci_irq_enable; 318 pcibios_disable_irq = acpi_pci_irq_disable; 319 x86_init.pci.init_irq = x86_init_noop; 320 321 if (pci_routeirq) { 322 /* 323 * PCI IRQ routing is set up by pci_enable_device(), but we 324 * also do it here in case there are still broken drivers that 325 * don't use pci_enable_device(). 326 */ 327 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); 328 for_each_pci_dev(dev) 329 acpi_pci_irq_enable(dev); 330 } 331 332 return 0; 333 } 334