1 /* 2 * Driver for OHCI 1394 controllers 3 * 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #include <linux/compiler.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/firewire.h> 26 #include <linux/firewire-constants.h> 27 #include <linux/gfp.h> 28 #include <linux/init.h> 29 #include <linux/interrupt.h> 30 #include <linux/io.h> 31 #include <linux/kernel.h> 32 #include <linux/list.h> 33 #include <linux/mm.h> 34 #include <linux/module.h> 35 #include <linux/moduleparam.h> 36 #include <linux/pci.h> 37 #include <linux/pci_ids.h> 38 #include <linux/spinlock.h> 39 #include <linux/string.h> 40 41 #include <asm/byteorder.h> 42 #include <asm/page.h> 43 #include <asm/system.h> 44 45 #ifdef CONFIG_PPC_PMAC 46 #include <asm/pmac_feature.h> 47 #endif 48 49 #include "core.h" 50 #include "ohci.h" 51 52 #define DESCRIPTOR_OUTPUT_MORE 0 53 #define DESCRIPTOR_OUTPUT_LAST (1 << 12) 54 #define DESCRIPTOR_INPUT_MORE (2 << 12) 55 #define DESCRIPTOR_INPUT_LAST (3 << 12) 56 #define DESCRIPTOR_STATUS (1 << 11) 57 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 58 #define DESCRIPTOR_PING (1 << 7) 59 #define DESCRIPTOR_YY (1 << 6) 60 #define DESCRIPTOR_NO_IRQ (0 << 4) 61 #define DESCRIPTOR_IRQ_ERROR (1 << 4) 62 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 63 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 64 #define DESCRIPTOR_WAIT (3 << 0) 65 66 struct descriptor { 67 __le16 req_count; 68 __le16 control; 69 __le32 data_address; 70 __le32 branch_address; 71 __le16 res_count; 72 __le16 transfer_status; 73 } __attribute__((aligned(16))); 74 75 #define CONTROL_SET(regs) (regs) 76 #define CONTROL_CLEAR(regs) ((regs) + 4) 77 #define COMMAND_PTR(regs) ((regs) + 12) 78 #define CONTEXT_MATCH(regs) ((regs) + 16) 79 80 struct ar_buffer { 81 struct descriptor descriptor; 82 struct ar_buffer *next; 83 __le32 data[0]; 84 }; 85 86 struct ar_context { 87 struct fw_ohci *ohci; 88 struct ar_buffer *current_buffer; 89 struct ar_buffer *last_buffer; 90 void *pointer; 91 u32 regs; 92 struct tasklet_struct tasklet; 93 }; 94 95 struct context; 96 97 typedef int (*descriptor_callback_t)(struct context *ctx, 98 struct descriptor *d, 99 struct descriptor *last); 100 101 /* 102 * A buffer that contains a block of DMA-able coherent memory used for 103 * storing a portion of a DMA descriptor program. 104 */ 105 struct descriptor_buffer { 106 struct list_head list; 107 dma_addr_t buffer_bus; 108 size_t buffer_size; 109 size_t used; 110 struct descriptor buffer[0]; 111 }; 112 113 struct context { 114 struct fw_ohci *ohci; 115 u32 regs; 116 int total_allocation; 117 118 /* 119 * List of page-sized buffers for storing DMA descriptors. 120 * Head of list contains buffers in use and tail of list contains 121 * free buffers. 122 */ 123 struct list_head buffer_list; 124 125 /* 126 * Pointer to a buffer inside buffer_list that contains the tail 127 * end of the current DMA program. 128 */ 129 struct descriptor_buffer *buffer_tail; 130 131 /* 132 * The descriptor containing the branch address of the first 133 * descriptor that has not yet been filled by the device. 134 */ 135 struct descriptor *last; 136 137 /* 138 * The last descriptor in the DMA program. It contains the branch 139 * address that must be updated upon appending a new descriptor. 140 */ 141 struct descriptor *prev; 142 143 descriptor_callback_t callback; 144 145 struct tasklet_struct tasklet; 146 }; 147 148 #define IT_HEADER_SY(v) ((v) << 0) 149 #define IT_HEADER_TCODE(v) ((v) << 4) 150 #define IT_HEADER_CHANNEL(v) ((v) << 8) 151 #define IT_HEADER_TAG(v) ((v) << 14) 152 #define IT_HEADER_SPEED(v) ((v) << 16) 153 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 154 155 struct iso_context { 156 struct fw_iso_context base; 157 struct context context; 158 int excess_bytes; 159 void *header; 160 size_t header_length; 161 }; 162 163 #define CONFIG_ROM_SIZE 1024 164 165 struct fw_ohci { 166 struct fw_card card; 167 168 __iomem char *registers; 169 int node_id; 170 int generation; 171 int request_generation; /* for timestamping incoming requests */ 172 unsigned quirks; 173 174 /* 175 * Spinlock for accessing fw_ohci data. Never call out of 176 * this driver with this lock held. 177 */ 178 spinlock_t lock; 179 180 struct ar_context ar_request_ctx; 181 struct ar_context ar_response_ctx; 182 struct context at_request_ctx; 183 struct context at_response_ctx; 184 185 u32 it_context_mask; 186 struct iso_context *it_context_list; 187 u64 ir_context_channels; 188 u32 ir_context_mask; 189 struct iso_context *ir_context_list; 190 191 __be32 *config_rom; 192 dma_addr_t config_rom_bus; 193 __be32 *next_config_rom; 194 dma_addr_t next_config_rom_bus; 195 __be32 next_header; 196 197 __le32 *self_id_cpu; 198 dma_addr_t self_id_bus; 199 struct tasklet_struct bus_reset_tasklet; 200 201 u32 self_id_buffer[512]; 202 }; 203 204 static inline struct fw_ohci *fw_ohci(struct fw_card *card) 205 { 206 return container_of(card, struct fw_ohci, card); 207 } 208 209 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 210 #define IR_CONTEXT_BUFFER_FILL 0x80000000 211 #define IR_CONTEXT_ISOCH_HEADER 0x40000000 212 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 213 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 214 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 215 216 #define CONTEXT_RUN 0x8000 217 #define CONTEXT_WAKE 0x1000 218 #define CONTEXT_DEAD 0x0800 219 #define CONTEXT_ACTIVE 0x0400 220 221 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf 222 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 223 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 224 225 #define OHCI1394_REGISTER_SIZE 0x800 226 #define OHCI_LOOP_COUNT 500 227 #define OHCI1394_PCI_HCI_Control 0x40 228 #define SELF_ID_BUF_SIZE 0x800 229 #define OHCI_TCODE_PHY_PACKET 0x0e 230 #define OHCI_VERSION_1_1 0x010010 231 232 static char ohci_driver_name[] = KBUILD_MODNAME; 233 234 #define QUIRK_CYCLE_TIMER 1 235 #define QUIRK_RESET_PACKET 2 236 #define QUIRK_BE_HEADERS 4 237 238 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 239 static const struct { 240 unsigned short vendor, device, flags; 241 } ohci_quirks[] = { 242 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 243 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 244 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 245 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 246 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 247 }; 248 249 /* This overrides anything that was found in ohci_quirks[]. */ 250 static int param_quirks; 251 module_param_named(quirks, param_quirks, int, 0644); 252 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" 253 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 254 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 255 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 256 ")"); 257 258 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG 259 260 #define OHCI_PARAM_DEBUG_AT_AR 1 261 #define OHCI_PARAM_DEBUG_SELFIDS 2 262 #define OHCI_PARAM_DEBUG_IRQS 4 263 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 264 265 static int param_debug; 266 module_param_named(debug, param_debug, int, 0644); 267 MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 268 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) 269 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) 270 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) 271 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 272 ", or a combination, or all = -1)"); 273 274 static void log_irqs(u32 evt) 275 { 276 if (likely(!(param_debug & 277 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 278 return; 279 280 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && 281 !(evt & OHCI1394_busReset)) 282 return; 283 284 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 285 evt & OHCI1394_selfIDComplete ? " selfID" : "", 286 evt & OHCI1394_RQPkt ? " AR_req" : "", 287 evt & OHCI1394_RSPkt ? " AR_resp" : "", 288 evt & OHCI1394_reqTxComplete ? " AT_req" : "", 289 evt & OHCI1394_respTxComplete ? " AT_resp" : "", 290 evt & OHCI1394_isochRx ? " IR" : "", 291 evt & OHCI1394_isochTx ? " IT" : "", 292 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 293 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 294 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 295 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 296 evt & OHCI1394_busReset ? " busReset" : "", 297 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 298 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 299 OHCI1394_respTxComplete | OHCI1394_isochRx | 300 OHCI1394_isochTx | OHCI1394_postedWriteErr | 301 OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | 302 OHCI1394_regAccessFail | OHCI1394_busReset) 303 ? " ?" : ""); 304 } 305 306 static const char *speed[] = { 307 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", 308 }; 309 static const char *power[] = { 310 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", 311 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", 312 }; 313 static const char port[] = { '.', '-', 'p', 'c', }; 314 315 static char _p(u32 *s, int shift) 316 { 317 return port[*s >> shift & 3]; 318 } 319 320 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) 321 { 322 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 323 return; 324 325 fw_notify("%d selfIDs, generation %d, local node ID %04x\n", 326 self_id_count, generation, node_id); 327 328 for (; self_id_count--; ++s) 329 if ((*s & 1 << 23) == 0) 330 fw_notify("selfID 0: %08x, phy %d [%c%c%c] " 331 "%s gc=%d %s %s%s%s\n", 332 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 333 speed[*s >> 14 & 3], *s >> 16 & 63, 334 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 335 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 336 else 337 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", 338 *s, *s >> 24 & 63, 339 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 340 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 341 } 342 343 static const char *evts[] = { 344 [0x00] = "evt_no_status", [0x01] = "-reserved-", 345 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", 346 [0x04] = "evt_underrun", [0x05] = "evt_overrun", 347 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", 348 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", 349 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", 350 [0x0c] = "-reserved-", [0x0d] = "-reserved-", 351 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", 352 [0x10] = "-reserved-", [0x11] = "ack_complete", 353 [0x12] = "ack_pending ", [0x13] = "-reserved-", 354 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", 355 [0x16] = "ack_busy_B", [0x17] = "-reserved-", 356 [0x18] = "-reserved-", [0x19] = "-reserved-", 357 [0x1a] = "-reserved-", [0x1b] = "ack_tardy", 358 [0x1c] = "-reserved-", [0x1d] = "ack_data_error", 359 [0x1e] = "ack_type_error", [0x1f] = "-reserved-", 360 [0x20] = "pending/cancelled", 361 }; 362 static const char *tcodes[] = { 363 [0x0] = "QW req", [0x1] = "BW req", 364 [0x2] = "W resp", [0x3] = "-reserved-", 365 [0x4] = "QR req", [0x5] = "BR req", 366 [0x6] = "QR resp", [0x7] = "BR resp", 367 [0x8] = "cycle start", [0x9] = "Lk req", 368 [0xa] = "async stream packet", [0xb] = "Lk resp", 369 [0xc] = "-reserved-", [0xd] = "-reserved-", 370 [0xe] = "link internal", [0xf] = "-reserved-", 371 }; 372 static const char *phys[] = { 373 [0x0] = "phy config packet", [0x1] = "link-on packet", 374 [0x2] = "self-id packet", [0x3] = "-reserved-", 375 }; 376 377 static void log_ar_at_event(char dir, int speed, u32 *header, int evt) 378 { 379 int tcode = header[0] >> 4 & 0xf; 380 char specific[12]; 381 382 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) 383 return; 384 385 if (unlikely(evt >= ARRAY_SIZE(evts))) 386 evt = 0x1f; 387 388 if (evt == OHCI1394_evt_bus_reset) { 389 fw_notify("A%c evt_bus_reset, generation %d\n", 390 dir, (header[2] >> 16) & 0xff); 391 return; 392 } 393 394 if (header[0] == ~header[1]) { 395 fw_notify("A%c %s, %s, %08x\n", 396 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]); 397 return; 398 } 399 400 switch (tcode) { 401 case 0x0: case 0x6: case 0x8: 402 snprintf(specific, sizeof(specific), " = %08x", 403 be32_to_cpu((__force __be32)header[3])); 404 break; 405 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: 406 snprintf(specific, sizeof(specific), " %x,%x", 407 header[3] >> 16, header[3] & 0xffff); 408 break; 409 default: 410 specific[0] = '\0'; 411 } 412 413 switch (tcode) { 414 case 0xe: case 0xa: 415 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); 416 break; 417 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 418 fw_notify("A%c spd %x tl %02x, " 419 "%04x -> %04x, %s, " 420 "%s, %04x%08x%s\n", 421 dir, speed, header[0] >> 10 & 0x3f, 422 header[1] >> 16, header[0] >> 16, evts[evt], 423 tcodes[tcode], header[1] & 0xffff, header[2], specific); 424 break; 425 default: 426 fw_notify("A%c spd %x tl %02x, " 427 "%04x -> %04x, %s, " 428 "%s%s\n", 429 dir, speed, header[0] >> 10 & 0x3f, 430 header[1] >> 16, header[0] >> 16, evts[evt], 431 tcodes[tcode], specific); 432 } 433 } 434 435 #else 436 437 #define log_irqs(evt) 438 #define log_selfids(node_id, generation, self_id_count, sid) 439 #define log_ar_at_event(dir, speed, header, evt) 440 441 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 442 443 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 444 { 445 writel(data, ohci->registers + offset); 446 } 447 448 static inline u32 reg_read(const struct fw_ohci *ohci, int offset) 449 { 450 return readl(ohci->registers + offset); 451 } 452 453 static inline void flush_writes(const struct fw_ohci *ohci) 454 { 455 /* Do a dummy read to flush writes. */ 456 reg_read(ohci, OHCI1394_Version); 457 } 458 459 static int ohci_update_phy_reg(struct fw_card *card, int addr, 460 int clear_bits, int set_bits) 461 { 462 struct fw_ohci *ohci = fw_ohci(card); 463 u32 val, old; 464 465 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 466 flush_writes(ohci); 467 msleep(2); 468 val = reg_read(ohci, OHCI1394_PhyControl); 469 if ((val & OHCI1394_PhyControl_ReadDone) == 0) { 470 fw_error("failed to set phy reg bits.\n"); 471 return -EBUSY; 472 } 473 474 old = OHCI1394_PhyControl_ReadData(val); 475 old = (old & ~clear_bits) | set_bits; 476 reg_write(ohci, OHCI1394_PhyControl, 477 OHCI1394_PhyControl_Write(addr, old)); 478 479 return 0; 480 } 481 482 static int ar_context_add_page(struct ar_context *ctx) 483 { 484 struct device *dev = ctx->ohci->card.device; 485 struct ar_buffer *ab; 486 dma_addr_t uninitialized_var(ab_bus); 487 size_t offset; 488 489 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); 490 if (ab == NULL) 491 return -ENOMEM; 492 493 ab->next = NULL; 494 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 495 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 496 DESCRIPTOR_STATUS | 497 DESCRIPTOR_BRANCH_ALWAYS); 498 offset = offsetof(struct ar_buffer, data); 499 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 500 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); 501 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 502 ab->descriptor.branch_address = 0; 503 504 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 505 ctx->last_buffer->next = ab; 506 ctx->last_buffer = ab; 507 508 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 509 flush_writes(ctx->ohci); 510 511 return 0; 512 } 513 514 static void ar_context_release(struct ar_context *ctx) 515 { 516 struct ar_buffer *ab, *ab_next; 517 size_t offset; 518 dma_addr_t ab_bus; 519 520 for (ab = ctx->current_buffer; ab; ab = ab_next) { 521 ab_next = ab->next; 522 offset = offsetof(struct ar_buffer, data); 523 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 524 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, 525 ab, ab_bus); 526 } 527 } 528 529 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 530 #define cond_le32_to_cpu(v) \ 531 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) 532 #else 533 #define cond_le32_to_cpu(v) le32_to_cpu(v) 534 #endif 535 536 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) 537 { 538 struct fw_ohci *ohci = ctx->ohci; 539 struct fw_packet p; 540 u32 status, length, tcode; 541 int evt; 542 543 p.header[0] = cond_le32_to_cpu(buffer[0]); 544 p.header[1] = cond_le32_to_cpu(buffer[1]); 545 p.header[2] = cond_le32_to_cpu(buffer[2]); 546 547 tcode = (p.header[0] >> 4) & 0x0f; 548 switch (tcode) { 549 case TCODE_WRITE_QUADLET_REQUEST: 550 case TCODE_READ_QUADLET_RESPONSE: 551 p.header[3] = (__force __u32) buffer[3]; 552 p.header_length = 16; 553 p.payload_length = 0; 554 break; 555 556 case TCODE_READ_BLOCK_REQUEST : 557 p.header[3] = cond_le32_to_cpu(buffer[3]); 558 p.header_length = 16; 559 p.payload_length = 0; 560 break; 561 562 case TCODE_WRITE_BLOCK_REQUEST: 563 case TCODE_READ_BLOCK_RESPONSE: 564 case TCODE_LOCK_REQUEST: 565 case TCODE_LOCK_RESPONSE: 566 p.header[3] = cond_le32_to_cpu(buffer[3]); 567 p.header_length = 16; 568 p.payload_length = p.header[3] >> 16; 569 break; 570 571 case TCODE_WRITE_RESPONSE: 572 case TCODE_READ_QUADLET_REQUEST: 573 case OHCI_TCODE_PHY_PACKET: 574 p.header_length = 12; 575 p.payload_length = 0; 576 break; 577 578 default: 579 /* FIXME: Stop context, discard everything, and restart? */ 580 p.header_length = 0; 581 p.payload_length = 0; 582 } 583 584 p.payload = (void *) buffer + p.header_length; 585 586 /* FIXME: What to do about evt_* errors? */ 587 length = (p.header_length + p.payload_length + 3) / 4; 588 status = cond_le32_to_cpu(buffer[length]); 589 evt = (status >> 16) & 0x1f; 590 591 p.ack = evt - 16; 592 p.speed = (status >> 21) & 0x7; 593 p.timestamp = status & 0xffff; 594 p.generation = ohci->request_generation; 595 596 log_ar_at_event('R', p.speed, p.header, evt); 597 598 /* 599 * The OHCI bus reset handler synthesizes a phy packet with 600 * the new generation number when a bus reset happens (see 601 * section 8.4.2.3). This helps us determine when a request 602 * was received and make sure we send the response in the same 603 * generation. We only need this for requests; for responses 604 * we use the unique tlabel for finding the matching 605 * request. 606 * 607 * Alas some chips sometimes emit bus reset packets with a 608 * wrong generation. We set the correct generation for these 609 * at a slightly incorrect time (in bus_reset_tasklet). 610 */ 611 if (evt == OHCI1394_evt_bus_reset) { 612 if (!(ohci->quirks & QUIRK_RESET_PACKET)) 613 ohci->request_generation = (p.header[2] >> 16) & 0xff; 614 } else if (ctx == &ohci->ar_request_ctx) { 615 fw_core_handle_request(&ohci->card, &p); 616 } else { 617 fw_core_handle_response(&ohci->card, &p); 618 } 619 620 return buffer + length + 1; 621 } 622 623 static void ar_context_tasklet(unsigned long data) 624 { 625 struct ar_context *ctx = (struct ar_context *)data; 626 struct fw_ohci *ohci = ctx->ohci; 627 struct ar_buffer *ab; 628 struct descriptor *d; 629 void *buffer, *end; 630 631 ab = ctx->current_buffer; 632 d = &ab->descriptor; 633 634 if (d->res_count == 0) { 635 size_t size, rest, offset; 636 dma_addr_t start_bus; 637 void *start; 638 639 /* 640 * This descriptor is finished and we may have a 641 * packet split across this and the next buffer. We 642 * reuse the page for reassembling the split packet. 643 */ 644 645 offset = offsetof(struct ar_buffer, data); 646 start = buffer = ab; 647 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 648 649 ab = ab->next; 650 d = &ab->descriptor; 651 size = buffer + PAGE_SIZE - ctx->pointer; 652 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); 653 memmove(buffer, ctx->pointer, size); 654 memcpy(buffer + size, ab->data, rest); 655 ctx->current_buffer = ab; 656 ctx->pointer = (void *) ab->data + rest; 657 end = buffer + size + rest; 658 659 while (buffer < end) 660 buffer = handle_ar_packet(ctx, buffer); 661 662 dma_free_coherent(ohci->card.device, PAGE_SIZE, 663 start, start_bus); 664 ar_context_add_page(ctx); 665 } else { 666 buffer = ctx->pointer; 667 ctx->pointer = end = 668 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); 669 670 while (buffer < end) 671 buffer = handle_ar_packet(ctx, buffer); 672 } 673 } 674 675 static int ar_context_init(struct ar_context *ctx, 676 struct fw_ohci *ohci, u32 regs) 677 { 678 struct ar_buffer ab; 679 680 ctx->regs = regs; 681 ctx->ohci = ohci; 682 ctx->last_buffer = &ab; 683 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 684 685 ar_context_add_page(ctx); 686 ar_context_add_page(ctx); 687 ctx->current_buffer = ab.next; 688 ctx->pointer = ctx->current_buffer->data; 689 690 return 0; 691 } 692 693 static void ar_context_run(struct ar_context *ctx) 694 { 695 struct ar_buffer *ab = ctx->current_buffer; 696 dma_addr_t ab_bus; 697 size_t offset; 698 699 offset = offsetof(struct ar_buffer, data); 700 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 701 702 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); 703 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 704 flush_writes(ctx->ohci); 705 } 706 707 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 708 { 709 int b, key; 710 711 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; 712 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8; 713 714 /* figure out which descriptor the branch address goes in */ 715 if (z == 2 && (b == 3 || key == 2)) 716 return d; 717 else 718 return d + z - 1; 719 } 720 721 static void context_tasklet(unsigned long data) 722 { 723 struct context *ctx = (struct context *) data; 724 struct descriptor *d, *last; 725 u32 address; 726 int z; 727 struct descriptor_buffer *desc; 728 729 desc = list_entry(ctx->buffer_list.next, 730 struct descriptor_buffer, list); 731 last = ctx->last; 732 while (last->branch_address != 0) { 733 struct descriptor_buffer *old_desc = desc; 734 address = le32_to_cpu(last->branch_address); 735 z = address & 0xf; 736 address &= ~0xf; 737 738 /* If the branch address points to a buffer outside of the 739 * current buffer, advance to the next buffer. */ 740 if (address < desc->buffer_bus || 741 address >= desc->buffer_bus + desc->used) 742 desc = list_entry(desc->list.next, 743 struct descriptor_buffer, list); 744 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 745 last = find_branch_descriptor(d, z); 746 747 if (!ctx->callback(ctx, d, last)) 748 break; 749 750 if (old_desc != desc) { 751 /* If we've advanced to the next buffer, move the 752 * previous buffer to the free list. */ 753 unsigned long flags; 754 old_desc->used = 0; 755 spin_lock_irqsave(&ctx->ohci->lock, flags); 756 list_move_tail(&old_desc->list, &ctx->buffer_list); 757 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 758 } 759 ctx->last = last; 760 } 761 } 762 763 /* 764 * Allocate a new buffer and add it to the list of free buffers for this 765 * context. Must be called with ohci->lock held. 766 */ 767 static int context_add_buffer(struct context *ctx) 768 { 769 struct descriptor_buffer *desc; 770 dma_addr_t uninitialized_var(bus_addr); 771 int offset; 772 773 /* 774 * 16MB of descriptors should be far more than enough for any DMA 775 * program. This will catch run-away userspace or DoS attacks. 776 */ 777 if (ctx->total_allocation >= 16*1024*1024) 778 return -ENOMEM; 779 780 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, 781 &bus_addr, GFP_ATOMIC); 782 if (!desc) 783 return -ENOMEM; 784 785 offset = (void *)&desc->buffer - (void *)desc; 786 desc->buffer_size = PAGE_SIZE - offset; 787 desc->buffer_bus = bus_addr + offset; 788 desc->used = 0; 789 790 list_add_tail(&desc->list, &ctx->buffer_list); 791 ctx->total_allocation += PAGE_SIZE; 792 793 return 0; 794 } 795 796 static int context_init(struct context *ctx, struct fw_ohci *ohci, 797 u32 regs, descriptor_callback_t callback) 798 { 799 ctx->ohci = ohci; 800 ctx->regs = regs; 801 ctx->total_allocation = 0; 802 803 INIT_LIST_HEAD(&ctx->buffer_list); 804 if (context_add_buffer(ctx) < 0) 805 return -ENOMEM; 806 807 ctx->buffer_tail = list_entry(ctx->buffer_list.next, 808 struct descriptor_buffer, list); 809 810 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 811 ctx->callback = callback; 812 813 /* 814 * We put a dummy descriptor in the buffer that has a NULL 815 * branch address and looks like it's been sent. That way we 816 * have a descriptor to append DMA programs to. 817 */ 818 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); 819 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 820 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); 821 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); 822 ctx->last = ctx->buffer_tail->buffer; 823 ctx->prev = ctx->buffer_tail->buffer; 824 825 return 0; 826 } 827 828 static void context_release(struct context *ctx) 829 { 830 struct fw_card *card = &ctx->ohci->card; 831 struct descriptor_buffer *desc, *tmp; 832 833 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) 834 dma_free_coherent(card->device, PAGE_SIZE, desc, 835 desc->buffer_bus - 836 ((void *)&desc->buffer - (void *)desc)); 837 } 838 839 /* Must be called with ohci->lock held */ 840 static struct descriptor *context_get_descriptors(struct context *ctx, 841 int z, dma_addr_t *d_bus) 842 { 843 struct descriptor *d = NULL; 844 struct descriptor_buffer *desc = ctx->buffer_tail; 845 846 if (z * sizeof(*d) > desc->buffer_size) 847 return NULL; 848 849 if (z * sizeof(*d) > desc->buffer_size - desc->used) { 850 /* No room for the descriptor in this buffer, so advance to the 851 * next one. */ 852 853 if (desc->list.next == &ctx->buffer_list) { 854 /* If there is no free buffer next in the list, 855 * allocate one. */ 856 if (context_add_buffer(ctx) < 0) 857 return NULL; 858 } 859 desc = list_entry(desc->list.next, 860 struct descriptor_buffer, list); 861 ctx->buffer_tail = desc; 862 } 863 864 d = desc->buffer + desc->used / sizeof(*d); 865 memset(d, 0, z * sizeof(*d)); 866 *d_bus = desc->buffer_bus + desc->used; 867 868 return d; 869 } 870 871 static void context_run(struct context *ctx, u32 extra) 872 { 873 struct fw_ohci *ohci = ctx->ohci; 874 875 reg_write(ohci, COMMAND_PTR(ctx->regs), 876 le32_to_cpu(ctx->last->branch_address)); 877 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 878 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 879 flush_writes(ohci); 880 } 881 882 static void context_append(struct context *ctx, 883 struct descriptor *d, int z, int extra) 884 { 885 dma_addr_t d_bus; 886 struct descriptor_buffer *desc = ctx->buffer_tail; 887 888 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 889 890 desc->used += (z + extra) * sizeof(*d); 891 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 892 ctx->prev = find_branch_descriptor(d, z); 893 894 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 895 flush_writes(ctx->ohci); 896 } 897 898 static void context_stop(struct context *ctx) 899 { 900 u32 reg; 901 int i; 902 903 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 904 flush_writes(ctx->ohci); 905 906 for (i = 0; i < 10; i++) { 907 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 908 if ((reg & CONTEXT_ACTIVE) == 0) 909 return; 910 911 mdelay(1); 912 } 913 fw_error("Error: DMA context still active (0x%08x)\n", reg); 914 } 915 916 struct driver_data { 917 struct fw_packet *packet; 918 }; 919 920 /* 921 * This function apppends a packet to the DMA queue for transmission. 922 * Must always be called with the ochi->lock held to ensure proper 923 * generation handling and locking around packet queue manipulation. 924 */ 925 static int at_context_queue_packet(struct context *ctx, 926 struct fw_packet *packet) 927 { 928 struct fw_ohci *ohci = ctx->ohci; 929 dma_addr_t d_bus, uninitialized_var(payload_bus); 930 struct driver_data *driver_data; 931 struct descriptor *d, *last; 932 __le32 *header; 933 int z, tcode; 934 u32 reg; 935 936 d = context_get_descriptors(ctx, 4, &d_bus); 937 if (d == NULL) { 938 packet->ack = RCODE_SEND_ERROR; 939 return -1; 940 } 941 942 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 943 d[0].res_count = cpu_to_le16(packet->timestamp); 944 945 /* 946 * The DMA format for asyncronous link packets is different 947 * from the IEEE1394 layout, so shift the fields around 948 * accordingly. If header_length is 8, it's a PHY packet, to 949 * which we need to prepend an extra quadlet. 950 */ 951 952 header = (__le32 *) &d[1]; 953 switch (packet->header_length) { 954 case 16: 955 case 12: 956 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 957 (packet->speed << 16)); 958 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 959 (packet->header[0] & 0xffff0000)); 960 header[2] = cpu_to_le32(packet->header[2]); 961 962 tcode = (packet->header[0] >> 4) & 0x0f; 963 if (TCODE_IS_BLOCK_PACKET(tcode)) 964 header[3] = cpu_to_le32(packet->header[3]); 965 else 966 header[3] = (__force __le32) packet->header[3]; 967 968 d[0].req_count = cpu_to_le16(packet->header_length); 969 break; 970 971 case 8: 972 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 973 (packet->speed << 16)); 974 header[1] = cpu_to_le32(packet->header[0]); 975 header[2] = cpu_to_le32(packet->header[1]); 976 d[0].req_count = cpu_to_le16(12); 977 break; 978 979 case 4: 980 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 981 (packet->speed << 16)); 982 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 983 d[0].req_count = cpu_to_le16(8); 984 break; 985 986 default: 987 /* BUG(); */ 988 packet->ack = RCODE_SEND_ERROR; 989 return -1; 990 } 991 992 driver_data = (struct driver_data *) &d[3]; 993 driver_data->packet = packet; 994 packet->driver_data = driver_data; 995 996 if (packet->payload_length > 0) { 997 payload_bus = 998 dma_map_single(ohci->card.device, packet->payload, 999 packet->payload_length, DMA_TO_DEVICE); 1000 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1001 packet->ack = RCODE_SEND_ERROR; 1002 return -1; 1003 } 1004 packet->payload_bus = payload_bus; 1005 packet->payload_mapped = true; 1006 1007 d[2].req_count = cpu_to_le16(packet->payload_length); 1008 d[2].data_address = cpu_to_le32(payload_bus); 1009 last = &d[2]; 1010 z = 3; 1011 } else { 1012 last = &d[0]; 1013 z = 2; 1014 } 1015 1016 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1017 DESCRIPTOR_IRQ_ALWAYS | 1018 DESCRIPTOR_BRANCH_ALWAYS); 1019 1020 /* 1021 * If the controller and packet generations don't match, we need to 1022 * bail out and try again. If IntEvent.busReset is set, the AT context 1023 * is halted, so appending to the context and trying to run it is 1024 * futile. Most controllers do the right thing and just flush the AT 1025 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but 1026 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind 1027 * up stalling out. So we just bail out in software and try again 1028 * later, and everyone is happy. 1029 * FIXME: Document how the locking works. 1030 */ 1031 if (ohci->generation != packet->generation || 1032 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { 1033 if (packet->payload_mapped) 1034 dma_unmap_single(ohci->card.device, payload_bus, 1035 packet->payload_length, DMA_TO_DEVICE); 1036 packet->ack = RCODE_GENERATION; 1037 return -1; 1038 } 1039 1040 context_append(ctx, d, z, 4 - z); 1041 1042 /* If the context isn't already running, start it up. */ 1043 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 1044 if ((reg & CONTEXT_RUN) == 0) 1045 context_run(ctx, 0); 1046 1047 return 0; 1048 } 1049 1050 static int handle_at_packet(struct context *context, 1051 struct descriptor *d, 1052 struct descriptor *last) 1053 { 1054 struct driver_data *driver_data; 1055 struct fw_packet *packet; 1056 struct fw_ohci *ohci = context->ohci; 1057 int evt; 1058 1059 if (last->transfer_status == 0) 1060 /* This descriptor isn't done yet, stop iteration. */ 1061 return 0; 1062 1063 driver_data = (struct driver_data *) &d[3]; 1064 packet = driver_data->packet; 1065 if (packet == NULL) 1066 /* This packet was cancelled, just continue. */ 1067 return 1; 1068 1069 if (packet->payload_mapped) 1070 dma_unmap_single(ohci->card.device, packet->payload_bus, 1071 packet->payload_length, DMA_TO_DEVICE); 1072 1073 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1074 packet->timestamp = le16_to_cpu(last->res_count); 1075 1076 log_ar_at_event('T', packet->speed, packet->header, evt); 1077 1078 switch (evt) { 1079 case OHCI1394_evt_timeout: 1080 /* Async response transmit timed out. */ 1081 packet->ack = RCODE_CANCELLED; 1082 break; 1083 1084 case OHCI1394_evt_flushed: 1085 /* 1086 * The packet was flushed should give same error as 1087 * when we try to use a stale generation count. 1088 */ 1089 packet->ack = RCODE_GENERATION; 1090 break; 1091 1092 case OHCI1394_evt_missing_ack: 1093 /* 1094 * Using a valid (current) generation count, but the 1095 * node is not on the bus or not sending acks. 1096 */ 1097 packet->ack = RCODE_NO_ACK; 1098 break; 1099 1100 case ACK_COMPLETE + 0x10: 1101 case ACK_PENDING + 0x10: 1102 case ACK_BUSY_X + 0x10: 1103 case ACK_BUSY_A + 0x10: 1104 case ACK_BUSY_B + 0x10: 1105 case ACK_DATA_ERROR + 0x10: 1106 case ACK_TYPE_ERROR + 0x10: 1107 packet->ack = evt - 0x10; 1108 break; 1109 1110 default: 1111 packet->ack = RCODE_SEND_ERROR; 1112 break; 1113 } 1114 1115 packet->callback(packet, &ohci->card, packet->ack); 1116 1117 return 1; 1118 } 1119 1120 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 1121 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 1122 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 1123 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1124 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1125 1126 static void handle_local_rom(struct fw_ohci *ohci, 1127 struct fw_packet *packet, u32 csr) 1128 { 1129 struct fw_packet response; 1130 int tcode, length, i; 1131 1132 tcode = HEADER_GET_TCODE(packet->header[0]); 1133 if (TCODE_IS_BLOCK_PACKET(tcode)) 1134 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1135 else 1136 length = 4; 1137 1138 i = csr - CSR_CONFIG_ROM; 1139 if (i + length > CONFIG_ROM_SIZE) { 1140 fw_fill_response(&response, packet->header, 1141 RCODE_ADDRESS_ERROR, NULL, 0); 1142 } else if (!TCODE_IS_READ_REQUEST(tcode)) { 1143 fw_fill_response(&response, packet->header, 1144 RCODE_TYPE_ERROR, NULL, 0); 1145 } else { 1146 fw_fill_response(&response, packet->header, RCODE_COMPLETE, 1147 (void *) ohci->config_rom + i, length); 1148 } 1149 1150 fw_core_handle_response(&ohci->card, &response); 1151 } 1152 1153 static void handle_local_lock(struct fw_ohci *ohci, 1154 struct fw_packet *packet, u32 csr) 1155 { 1156 struct fw_packet response; 1157 int tcode, length, ext_tcode, sel; 1158 __be32 *payload, lock_old; 1159 u32 lock_arg, lock_data; 1160 1161 tcode = HEADER_GET_TCODE(packet->header[0]); 1162 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1163 payload = packet->payload; 1164 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 1165 1166 if (tcode == TCODE_LOCK_REQUEST && 1167 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 1168 lock_arg = be32_to_cpu(payload[0]); 1169 lock_data = be32_to_cpu(payload[1]); 1170 } else if (tcode == TCODE_READ_QUADLET_REQUEST) { 1171 lock_arg = 0; 1172 lock_data = 0; 1173 } else { 1174 fw_fill_response(&response, packet->header, 1175 RCODE_TYPE_ERROR, NULL, 0); 1176 goto out; 1177 } 1178 1179 sel = (csr - CSR_BUS_MANAGER_ID) / 4; 1180 reg_write(ohci, OHCI1394_CSRData, lock_data); 1181 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1182 reg_write(ohci, OHCI1394_CSRControl, sel); 1183 1184 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) 1185 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); 1186 else 1187 fw_notify("swap not done yet\n"); 1188 1189 fw_fill_response(&response, packet->header, 1190 RCODE_COMPLETE, &lock_old, sizeof(lock_old)); 1191 out: 1192 fw_core_handle_response(&ohci->card, &response); 1193 } 1194 1195 static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1196 { 1197 u64 offset; 1198 u32 csr; 1199 1200 if (ctx == &ctx->ohci->at_request_ctx) { 1201 packet->ack = ACK_PENDING; 1202 packet->callback(packet, &ctx->ohci->card, packet->ack); 1203 } 1204 1205 offset = 1206 ((unsigned long long) 1207 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 1208 packet->header[2]; 1209 csr = offset - CSR_REGISTER_BASE; 1210 1211 /* Handle config rom reads. */ 1212 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) 1213 handle_local_rom(ctx->ohci, packet, csr); 1214 else switch (csr) { 1215 case CSR_BUS_MANAGER_ID: 1216 case CSR_BANDWIDTH_AVAILABLE: 1217 case CSR_CHANNELS_AVAILABLE_HI: 1218 case CSR_CHANNELS_AVAILABLE_LO: 1219 handle_local_lock(ctx->ohci, packet, csr); 1220 break; 1221 default: 1222 if (ctx == &ctx->ohci->at_request_ctx) 1223 fw_core_handle_request(&ctx->ohci->card, packet); 1224 else 1225 fw_core_handle_response(&ctx->ohci->card, packet); 1226 break; 1227 } 1228 1229 if (ctx == &ctx->ohci->at_response_ctx) { 1230 packet->ack = ACK_COMPLETE; 1231 packet->callback(packet, &ctx->ohci->card, packet->ack); 1232 } 1233 } 1234 1235 static void at_context_transmit(struct context *ctx, struct fw_packet *packet) 1236 { 1237 unsigned long flags; 1238 int ret; 1239 1240 spin_lock_irqsave(&ctx->ohci->lock, flags); 1241 1242 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 1243 ctx->ohci->generation == packet->generation) { 1244 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1245 handle_local_request(ctx, packet); 1246 return; 1247 } 1248 1249 ret = at_context_queue_packet(ctx, packet); 1250 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1251 1252 if (ret < 0) 1253 packet->callback(packet, &ctx->ohci->card, packet->ack); 1254 1255 } 1256 1257 static void bus_reset_tasklet(unsigned long data) 1258 { 1259 struct fw_ohci *ohci = (struct fw_ohci *)data; 1260 int self_id_count, i, j, reg; 1261 int generation, new_generation; 1262 unsigned long flags; 1263 void *free_rom = NULL; 1264 dma_addr_t free_rom_bus = 0; 1265 1266 reg = reg_read(ohci, OHCI1394_NodeID); 1267 if (!(reg & OHCI1394_NodeID_idValid)) { 1268 fw_notify("node ID not valid, new bus reset in progress\n"); 1269 return; 1270 } 1271 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1272 fw_notify("malconfigured bus\n"); 1273 return; 1274 } 1275 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1276 OHCI1394_NodeID_nodeNumber); 1277 1278 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1279 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1280 fw_notify("inconsistent self IDs\n"); 1281 return; 1282 } 1283 /* 1284 * The count in the SelfIDCount register is the number of 1285 * bytes in the self ID receive buffer. Since we also receive 1286 * the inverted quadlets and a header quadlet, we shift one 1287 * bit extra to get the actual number of self IDs. 1288 */ 1289 self_id_count = (reg >> 3) & 0xff; 1290 if (self_id_count == 0 || self_id_count > 252) { 1291 fw_notify("inconsistent self IDs\n"); 1292 return; 1293 } 1294 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 1295 rmb(); 1296 1297 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1298 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { 1299 fw_notify("inconsistent self IDs\n"); 1300 return; 1301 } 1302 ohci->self_id_buffer[j] = 1303 cond_le32_to_cpu(ohci->self_id_cpu[i]); 1304 } 1305 rmb(); 1306 1307 /* 1308 * Check the consistency of the self IDs we just read. The 1309 * problem we face is that a new bus reset can start while we 1310 * read out the self IDs from the DMA buffer. If this happens, 1311 * the DMA buffer will be overwritten with new self IDs and we 1312 * will read out inconsistent data. The OHCI specification 1313 * (section 11.2) recommends a technique similar to 1314 * linux/seqlock.h, where we remember the generation of the 1315 * self IDs in the buffer before reading them out and compare 1316 * it to the current generation after reading them out. If 1317 * the two generations match we know we have a consistent set 1318 * of self IDs. 1319 */ 1320 1321 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1322 if (new_generation != generation) { 1323 fw_notify("recursive bus reset detected, " 1324 "discarding self ids\n"); 1325 return; 1326 } 1327 1328 /* FIXME: Document how the locking works. */ 1329 spin_lock_irqsave(&ohci->lock, flags); 1330 1331 ohci->generation = generation; 1332 context_stop(&ohci->at_request_ctx); 1333 context_stop(&ohci->at_response_ctx); 1334 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1335 1336 if (ohci->quirks & QUIRK_RESET_PACKET) 1337 ohci->request_generation = generation; 1338 1339 /* 1340 * This next bit is unrelated to the AT context stuff but we 1341 * have to do it under the spinlock also. If a new config rom 1342 * was set up before this reset, the old one is now no longer 1343 * in use and we can free it. Update the config rom pointers 1344 * to point to the current config rom and clear the 1345 * next_config_rom pointer so a new udpate can take place. 1346 */ 1347 1348 if (ohci->next_config_rom != NULL) { 1349 if (ohci->next_config_rom != ohci->config_rom) { 1350 free_rom = ohci->config_rom; 1351 free_rom_bus = ohci->config_rom_bus; 1352 } 1353 ohci->config_rom = ohci->next_config_rom; 1354 ohci->config_rom_bus = ohci->next_config_rom_bus; 1355 ohci->next_config_rom = NULL; 1356 1357 /* 1358 * Restore config_rom image and manually update 1359 * config_rom registers. Writing the header quadlet 1360 * will indicate that the config rom is ready, so we 1361 * do that last. 1362 */ 1363 reg_write(ohci, OHCI1394_BusOptions, 1364 be32_to_cpu(ohci->config_rom[2])); 1365 ohci->config_rom[0] = ohci->next_header; 1366 reg_write(ohci, OHCI1394_ConfigROMhdr, 1367 be32_to_cpu(ohci->next_header)); 1368 } 1369 1370 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1371 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); 1372 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 1373 #endif 1374 1375 spin_unlock_irqrestore(&ohci->lock, flags); 1376 1377 if (free_rom) 1378 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1379 free_rom, free_rom_bus); 1380 1381 log_selfids(ohci->node_id, generation, 1382 self_id_count, ohci->self_id_buffer); 1383 1384 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1385 self_id_count, ohci->self_id_buffer); 1386 } 1387 1388 static irqreturn_t irq_handler(int irq, void *data) 1389 { 1390 struct fw_ohci *ohci = data; 1391 u32 event, iso_event; 1392 int i; 1393 1394 event = reg_read(ohci, OHCI1394_IntEventClear); 1395 1396 if (!event || !~event) 1397 return IRQ_NONE; 1398 1399 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ 1400 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); 1401 log_irqs(event); 1402 1403 if (event & OHCI1394_selfIDComplete) 1404 tasklet_schedule(&ohci->bus_reset_tasklet); 1405 1406 if (event & OHCI1394_RQPkt) 1407 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 1408 1409 if (event & OHCI1394_RSPkt) 1410 tasklet_schedule(&ohci->ar_response_ctx.tasklet); 1411 1412 if (event & OHCI1394_reqTxComplete) 1413 tasklet_schedule(&ohci->at_request_ctx.tasklet); 1414 1415 if (event & OHCI1394_respTxComplete) 1416 tasklet_schedule(&ohci->at_response_ctx.tasklet); 1417 1418 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 1419 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 1420 1421 while (iso_event) { 1422 i = ffs(iso_event) - 1; 1423 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); 1424 iso_event &= ~(1 << i); 1425 } 1426 1427 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 1428 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 1429 1430 while (iso_event) { 1431 i = ffs(iso_event) - 1; 1432 tasklet_schedule(&ohci->it_context_list[i].context.tasklet); 1433 iso_event &= ~(1 << i); 1434 } 1435 1436 if (unlikely(event & OHCI1394_regAccessFail)) 1437 fw_error("Register access failure - " 1438 "please notify linux1394-devel@lists.sf.net\n"); 1439 1440 if (unlikely(event & OHCI1394_postedWriteErr)) 1441 fw_error("PCI posted write error\n"); 1442 1443 if (unlikely(event & OHCI1394_cycleTooLong)) { 1444 if (printk_ratelimit()) 1445 fw_notify("isochronous cycle too long\n"); 1446 reg_write(ohci, OHCI1394_LinkControlSet, 1447 OHCI1394_LinkControl_cycleMaster); 1448 } 1449 1450 if (unlikely(event & OHCI1394_cycleInconsistent)) { 1451 /* 1452 * We need to clear this event bit in order to make 1453 * cycleMatch isochronous I/O work. In theory we should 1454 * stop active cycleMatch iso contexts now and restart 1455 * them at least two cycles later. (FIXME?) 1456 */ 1457 if (printk_ratelimit()) 1458 fw_notify("isochronous cycle inconsistent\n"); 1459 } 1460 1461 return IRQ_HANDLED; 1462 } 1463 1464 static int software_reset(struct fw_ohci *ohci) 1465 { 1466 int i; 1467 1468 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); 1469 1470 for (i = 0; i < OHCI_LOOP_COUNT; i++) { 1471 if ((reg_read(ohci, OHCI1394_HCControlSet) & 1472 OHCI1394_HCControl_softReset) == 0) 1473 return 0; 1474 msleep(1); 1475 } 1476 1477 return -EBUSY; 1478 } 1479 1480 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) 1481 { 1482 size_t size = length * 4; 1483 1484 memcpy(dest, src, size); 1485 if (size < CONFIG_ROM_SIZE) 1486 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 1487 } 1488 1489 static int ohci_enable(struct fw_card *card, 1490 const __be32 *config_rom, size_t length) 1491 { 1492 struct fw_ohci *ohci = fw_ohci(card); 1493 struct pci_dev *dev = to_pci_dev(card->device); 1494 u32 lps; 1495 int i; 1496 1497 if (software_reset(ohci)) { 1498 fw_error("Failed to reset ohci card.\n"); 1499 return -EBUSY; 1500 } 1501 1502 /* 1503 * Now enable LPS, which we need in order to start accessing 1504 * most of the registers. In fact, on some cards (ALI M5251), 1505 * accessing registers in the SClk domain without LPS enabled 1506 * will lock up the machine. Wait 50msec to make sure we have 1507 * full link enabled. However, with some cards (well, at least 1508 * a JMicron PCIe card), we have to try again sometimes. 1509 */ 1510 reg_write(ohci, OHCI1394_HCControlSet, 1511 OHCI1394_HCControl_LPS | 1512 OHCI1394_HCControl_postedWriteEnable); 1513 flush_writes(ohci); 1514 1515 for (lps = 0, i = 0; !lps && i < 3; i++) { 1516 msleep(50); 1517 lps = reg_read(ohci, OHCI1394_HCControlSet) & 1518 OHCI1394_HCControl_LPS; 1519 } 1520 1521 if (!lps) { 1522 fw_error("Failed to set Link Power Status\n"); 1523 return -EIO; 1524 } 1525 1526 reg_write(ohci, OHCI1394_HCControlClear, 1527 OHCI1394_HCControl_noByteSwapData); 1528 1529 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 1530 reg_write(ohci, OHCI1394_LinkControlClear, 1531 OHCI1394_LinkControl_rcvPhyPkt); 1532 reg_write(ohci, OHCI1394_LinkControlSet, 1533 OHCI1394_LinkControl_rcvSelfID | 1534 OHCI1394_LinkControl_cycleTimerEnable | 1535 OHCI1394_LinkControl_cycleMaster); 1536 1537 reg_write(ohci, OHCI1394_ATRetries, 1538 OHCI1394_MAX_AT_REQ_RETRIES | 1539 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 1540 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); 1541 1542 ar_context_run(&ohci->ar_request_ctx); 1543 ar_context_run(&ohci->ar_response_ctx); 1544 1545 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 1546 reg_write(ohci, OHCI1394_IntEventClear, ~0); 1547 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 1548 reg_write(ohci, OHCI1394_IntMaskSet, 1549 OHCI1394_selfIDComplete | 1550 OHCI1394_RQPkt | OHCI1394_RSPkt | 1551 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1552 OHCI1394_isochRx | OHCI1394_isochTx | 1553 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1554 OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | 1555 OHCI1394_masterIntEnable); 1556 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1557 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1558 1559 /* Activate link_on bit and contender bit in our self ID packets.*/ 1560 if (ohci_update_phy_reg(card, 4, 0, 1561 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 1562 return -EIO; 1563 1564 /* 1565 * When the link is not yet enabled, the atomic config rom 1566 * update mechanism described below in ohci_set_config_rom() 1567 * is not active. We have to update ConfigRomHeader and 1568 * BusOptions manually, and the write to ConfigROMmap takes 1569 * effect immediately. We tie this to the enabling of the 1570 * link, so we have a valid config rom before enabling - the 1571 * OHCI requires that ConfigROMhdr and BusOptions have valid 1572 * values before enabling. 1573 * 1574 * However, when the ConfigROMmap is written, some controllers 1575 * always read back quadlets 0 and 2 from the config rom to 1576 * the ConfigRomHeader and BusOptions registers on bus reset. 1577 * They shouldn't do that in this initial case where the link 1578 * isn't enabled. This means we have to use the same 1579 * workaround here, setting the bus header to 0 and then write 1580 * the right values in the bus reset tasklet. 1581 */ 1582 1583 if (config_rom) { 1584 ohci->next_config_rom = 1585 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1586 &ohci->next_config_rom_bus, 1587 GFP_KERNEL); 1588 if (ohci->next_config_rom == NULL) 1589 return -ENOMEM; 1590 1591 copy_config_rom(ohci->next_config_rom, config_rom, length); 1592 } else { 1593 /* 1594 * In the suspend case, config_rom is NULL, which 1595 * means that we just reuse the old config rom. 1596 */ 1597 ohci->next_config_rom = ohci->config_rom; 1598 ohci->next_config_rom_bus = ohci->config_rom_bus; 1599 } 1600 1601 ohci->next_header = ohci->next_config_rom[0]; 1602 ohci->next_config_rom[0] = 0; 1603 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 1604 reg_write(ohci, OHCI1394_BusOptions, 1605 be32_to_cpu(ohci->next_config_rom[2])); 1606 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 1607 1608 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 1609 1610 if (request_irq(dev->irq, irq_handler, 1611 IRQF_SHARED, ohci_driver_name, ohci)) { 1612 fw_error("Failed to allocate shared interrupt %d.\n", 1613 dev->irq); 1614 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1615 ohci->config_rom, ohci->config_rom_bus); 1616 return -EIO; 1617 } 1618 1619 reg_write(ohci, OHCI1394_HCControlSet, 1620 OHCI1394_HCControl_linkEnable | 1621 OHCI1394_HCControl_BIBimageValid); 1622 flush_writes(ohci); 1623 1624 /* 1625 * We are ready to go, initiate bus reset to finish the 1626 * initialization. 1627 */ 1628 1629 fw_core_initiate_bus_reset(&ohci->card, 1); 1630 1631 return 0; 1632 } 1633 1634 static int ohci_set_config_rom(struct fw_card *card, 1635 const __be32 *config_rom, size_t length) 1636 { 1637 struct fw_ohci *ohci; 1638 unsigned long flags; 1639 int ret = -EBUSY; 1640 __be32 *next_config_rom; 1641 dma_addr_t uninitialized_var(next_config_rom_bus); 1642 1643 ohci = fw_ohci(card); 1644 1645 /* 1646 * When the OHCI controller is enabled, the config rom update 1647 * mechanism is a bit tricky, but easy enough to use. See 1648 * section 5.5.6 in the OHCI specification. 1649 * 1650 * The OHCI controller caches the new config rom address in a 1651 * shadow register (ConfigROMmapNext) and needs a bus reset 1652 * for the changes to take place. When the bus reset is 1653 * detected, the controller loads the new values for the 1654 * ConfigRomHeader and BusOptions registers from the specified 1655 * config rom and loads ConfigROMmap from the ConfigROMmapNext 1656 * shadow register. All automatically and atomically. 1657 * 1658 * Now, there's a twist to this story. The automatic load of 1659 * ConfigRomHeader and BusOptions doesn't honor the 1660 * noByteSwapData bit, so with a be32 config rom, the 1661 * controller will load be32 values in to these registers 1662 * during the atomic update, even on litte endian 1663 * architectures. The workaround we use is to put a 0 in the 1664 * header quadlet; 0 is endian agnostic and means that the 1665 * config rom isn't ready yet. In the bus reset tasklet we 1666 * then set up the real values for the two registers. 1667 * 1668 * We use ohci->lock to avoid racing with the code that sets 1669 * ohci->next_config_rom to NULL (see bus_reset_tasklet). 1670 */ 1671 1672 next_config_rom = 1673 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1674 &next_config_rom_bus, GFP_KERNEL); 1675 if (next_config_rom == NULL) 1676 return -ENOMEM; 1677 1678 spin_lock_irqsave(&ohci->lock, flags); 1679 1680 if (ohci->next_config_rom == NULL) { 1681 ohci->next_config_rom = next_config_rom; 1682 ohci->next_config_rom_bus = next_config_rom_bus; 1683 1684 copy_config_rom(ohci->next_config_rom, config_rom, length); 1685 1686 ohci->next_header = config_rom[0]; 1687 ohci->next_config_rom[0] = 0; 1688 1689 reg_write(ohci, OHCI1394_ConfigROMmap, 1690 ohci->next_config_rom_bus); 1691 ret = 0; 1692 } 1693 1694 spin_unlock_irqrestore(&ohci->lock, flags); 1695 1696 /* 1697 * Now initiate a bus reset to have the changes take 1698 * effect. We clean up the old config rom memory and DMA 1699 * mappings in the bus reset tasklet, since the OHCI 1700 * controller could need to access it before the bus reset 1701 * takes effect. 1702 */ 1703 if (ret == 0) 1704 fw_core_initiate_bus_reset(&ohci->card, 1); 1705 else 1706 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1707 next_config_rom, next_config_rom_bus); 1708 1709 return ret; 1710 } 1711 1712 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 1713 { 1714 struct fw_ohci *ohci = fw_ohci(card); 1715 1716 at_context_transmit(&ohci->at_request_ctx, packet); 1717 } 1718 1719 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) 1720 { 1721 struct fw_ohci *ohci = fw_ohci(card); 1722 1723 at_context_transmit(&ohci->at_response_ctx, packet); 1724 } 1725 1726 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 1727 { 1728 struct fw_ohci *ohci = fw_ohci(card); 1729 struct context *ctx = &ohci->at_request_ctx; 1730 struct driver_data *driver_data = packet->driver_data; 1731 int ret = -ENOENT; 1732 1733 tasklet_disable(&ctx->tasklet); 1734 1735 if (packet->ack != 0) 1736 goto out; 1737 1738 if (packet->payload_mapped) 1739 dma_unmap_single(ohci->card.device, packet->payload_bus, 1740 packet->payload_length, DMA_TO_DEVICE); 1741 1742 log_ar_at_event('T', packet->speed, packet->header, 0x20); 1743 driver_data->packet = NULL; 1744 packet->ack = RCODE_CANCELLED; 1745 packet->callback(packet, &ohci->card, packet->ack); 1746 ret = 0; 1747 out: 1748 tasklet_enable(&ctx->tasklet); 1749 1750 return ret; 1751 } 1752 1753 static int ohci_enable_phys_dma(struct fw_card *card, 1754 int node_id, int generation) 1755 { 1756 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1757 return 0; 1758 #else 1759 struct fw_ohci *ohci = fw_ohci(card); 1760 unsigned long flags; 1761 int n, ret = 0; 1762 1763 /* 1764 * FIXME: Make sure this bitmask is cleared when we clear the busReset 1765 * interrupt bit. Clear physReqResourceAllBuses on bus reset. 1766 */ 1767 1768 spin_lock_irqsave(&ohci->lock, flags); 1769 1770 if (ohci->generation != generation) { 1771 ret = -ESTALE; 1772 goto out; 1773 } 1774 1775 /* 1776 * Note, if the node ID contains a non-local bus ID, physical DMA is 1777 * enabled for _all_ nodes on remote buses. 1778 */ 1779 1780 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 1781 if (n < 32) 1782 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); 1783 else 1784 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); 1785 1786 flush_writes(ohci); 1787 out: 1788 spin_unlock_irqrestore(&ohci->lock, flags); 1789 1790 return ret; 1791 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1792 } 1793 1794 static u32 cycle_timer_ticks(u32 cycle_timer) 1795 { 1796 u32 ticks; 1797 1798 ticks = cycle_timer & 0xfff; 1799 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 1800 ticks += (3072 * 8000) * (cycle_timer >> 25); 1801 1802 return ticks; 1803 } 1804 1805 /* 1806 * Some controllers exhibit one or more of the following bugs when updating the 1807 * iso cycle timer register: 1808 * - When the lowest six bits are wrapping around to zero, a read that happens 1809 * at the same time will return garbage in the lowest ten bits. 1810 * - When the cycleOffset field wraps around to zero, the cycleCount field is 1811 * not incremented for about 60 ns. 1812 * - Occasionally, the entire register reads zero. 1813 * 1814 * To catch these, we read the register three times and ensure that the 1815 * difference between each two consecutive reads is approximately the same, i.e. 1816 * less than twice the other. Furthermore, any negative difference indicates an 1817 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 1818 * execute, so we have enough precision to compute the ratio of the differences.) 1819 */ 1820 static u32 ohci_get_cycle_time(struct fw_card *card) 1821 { 1822 struct fw_ohci *ohci = fw_ohci(card); 1823 u32 c0, c1, c2; 1824 u32 t0, t1, t2; 1825 s32 diff01, diff12; 1826 int i; 1827 1828 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1829 1830 if (ohci->quirks & QUIRK_CYCLE_TIMER) { 1831 i = 0; 1832 c1 = c2; 1833 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1834 do { 1835 c0 = c1; 1836 c1 = c2; 1837 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1838 t0 = cycle_timer_ticks(c0); 1839 t1 = cycle_timer_ticks(c1); 1840 t2 = cycle_timer_ticks(c2); 1841 diff01 = t1 - t0; 1842 diff12 = t2 - t1; 1843 } while ((diff01 <= 0 || diff12 <= 0 || 1844 diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 1845 && i++ < 20); 1846 } 1847 1848 return c2; 1849 } 1850 1851 static void copy_iso_headers(struct iso_context *ctx, void *p) 1852 { 1853 int i = ctx->header_length; 1854 1855 if (i + ctx->base.header_size > PAGE_SIZE) 1856 return; 1857 1858 /* 1859 * The iso header is byteswapped to little endian by 1860 * the controller, but the remaining header quadlets 1861 * are big endian. We want to present all the headers 1862 * as big endian, so we have to swap the first quadlet. 1863 */ 1864 if (ctx->base.header_size > 0) 1865 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 1866 if (ctx->base.header_size > 4) 1867 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); 1868 if (ctx->base.header_size > 8) 1869 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); 1870 ctx->header_length += ctx->base.header_size; 1871 } 1872 1873 static int handle_ir_packet_per_buffer(struct context *context, 1874 struct descriptor *d, 1875 struct descriptor *last) 1876 { 1877 struct iso_context *ctx = 1878 container_of(context, struct iso_context, context); 1879 struct descriptor *pd; 1880 __le32 *ir_header; 1881 void *p; 1882 1883 for (pd = d; pd <= last; pd++) { 1884 if (pd->transfer_status) 1885 break; 1886 } 1887 if (pd > last) 1888 /* Descriptor(s) not done yet, stop iteration */ 1889 return 0; 1890 1891 p = last + 1; 1892 copy_iso_headers(ctx, p); 1893 1894 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 1895 ir_header = (__le32 *) p; 1896 ctx->base.callback(&ctx->base, 1897 le32_to_cpu(ir_header[0]) & 0xffff, 1898 ctx->header_length, ctx->header, 1899 ctx->base.callback_data); 1900 ctx->header_length = 0; 1901 } 1902 1903 return 1; 1904 } 1905 1906 static int handle_it_packet(struct context *context, 1907 struct descriptor *d, 1908 struct descriptor *last) 1909 { 1910 struct iso_context *ctx = 1911 container_of(context, struct iso_context, context); 1912 int i; 1913 struct descriptor *pd; 1914 1915 for (pd = d; pd <= last; pd++) 1916 if (pd->transfer_status) 1917 break; 1918 if (pd > last) 1919 /* Descriptor(s) not done yet, stop iteration */ 1920 return 0; 1921 1922 i = ctx->header_length; 1923 if (i + 4 < PAGE_SIZE) { 1924 /* Present this value as big-endian to match the receive code */ 1925 *(__be32 *)(ctx->header + i) = cpu_to_be32( 1926 ((u32)le16_to_cpu(pd->transfer_status) << 16) | 1927 le16_to_cpu(pd->res_count)); 1928 ctx->header_length += 4; 1929 } 1930 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 1931 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1932 ctx->header_length, ctx->header, 1933 ctx->base.callback_data); 1934 ctx->header_length = 0; 1935 } 1936 return 1; 1937 } 1938 1939 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 1940 int type, int channel, size_t header_size) 1941 { 1942 struct fw_ohci *ohci = fw_ohci(card); 1943 struct iso_context *ctx, *list; 1944 descriptor_callback_t callback; 1945 u64 *channels, dont_care = ~0ULL; 1946 u32 *mask, regs; 1947 unsigned long flags; 1948 int index, ret = -ENOMEM; 1949 1950 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1951 channels = &dont_care; 1952 mask = &ohci->it_context_mask; 1953 list = ohci->it_context_list; 1954 callback = handle_it_packet; 1955 } else { 1956 channels = &ohci->ir_context_channels; 1957 mask = &ohci->ir_context_mask; 1958 list = ohci->ir_context_list; 1959 callback = handle_ir_packet_per_buffer; 1960 } 1961 1962 spin_lock_irqsave(&ohci->lock, flags); 1963 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 1964 if (index >= 0) { 1965 *channels &= ~(1ULL << channel); 1966 *mask &= ~(1 << index); 1967 } 1968 spin_unlock_irqrestore(&ohci->lock, flags); 1969 1970 if (index < 0) 1971 return ERR_PTR(-EBUSY); 1972 1973 if (type == FW_ISO_CONTEXT_TRANSMIT) 1974 regs = OHCI1394_IsoXmitContextBase(index); 1975 else 1976 regs = OHCI1394_IsoRcvContextBase(index); 1977 1978 ctx = &list[index]; 1979 memset(ctx, 0, sizeof(*ctx)); 1980 ctx->header_length = 0; 1981 ctx->header = (void *) __get_free_page(GFP_KERNEL); 1982 if (ctx->header == NULL) 1983 goto out; 1984 1985 ret = context_init(&ctx->context, ohci, regs, callback); 1986 if (ret < 0) 1987 goto out_with_header; 1988 1989 return &ctx->base; 1990 1991 out_with_header: 1992 free_page((unsigned long)ctx->header); 1993 out: 1994 spin_lock_irqsave(&ohci->lock, flags); 1995 *mask |= 1 << index; 1996 spin_unlock_irqrestore(&ohci->lock, flags); 1997 1998 return ERR_PTR(ret); 1999 } 2000 2001 static int ohci_start_iso(struct fw_iso_context *base, 2002 s32 cycle, u32 sync, u32 tags) 2003 { 2004 struct iso_context *ctx = container_of(base, struct iso_context, base); 2005 struct fw_ohci *ohci = ctx->context.ohci; 2006 u32 control, match; 2007 int index; 2008 2009 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2010 index = ctx - ohci->it_context_list; 2011 match = 0; 2012 if (cycle >= 0) 2013 match = IT_CONTEXT_CYCLE_MATCH_ENABLE | 2014 (cycle & 0x7fff) << 16; 2015 2016 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 2017 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 2018 context_run(&ctx->context, match); 2019 } else { 2020 index = ctx - ohci->ir_context_list; 2021 control = IR_CONTEXT_ISOCH_HEADER; 2022 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2023 if (cycle >= 0) { 2024 match |= (cycle & 0x07fff) << 12; 2025 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; 2026 } 2027 2028 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 2029 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2030 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2031 context_run(&ctx->context, control); 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int ohci_stop_iso(struct fw_iso_context *base) 2038 { 2039 struct fw_ohci *ohci = fw_ohci(base->card); 2040 struct iso_context *ctx = container_of(base, struct iso_context, base); 2041 int index; 2042 2043 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2044 index = ctx - ohci->it_context_list; 2045 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2046 } else { 2047 index = ctx - ohci->ir_context_list; 2048 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2049 } 2050 flush_writes(ohci); 2051 context_stop(&ctx->context); 2052 2053 return 0; 2054 } 2055 2056 static void ohci_free_iso_context(struct fw_iso_context *base) 2057 { 2058 struct fw_ohci *ohci = fw_ohci(base->card); 2059 struct iso_context *ctx = container_of(base, struct iso_context, base); 2060 unsigned long flags; 2061 int index; 2062 2063 ohci_stop_iso(base); 2064 context_release(&ctx->context); 2065 free_page((unsigned long)ctx->header); 2066 2067 spin_lock_irqsave(&ohci->lock, flags); 2068 2069 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2070 index = ctx - ohci->it_context_list; 2071 ohci->it_context_mask |= 1 << index; 2072 } else { 2073 index = ctx - ohci->ir_context_list; 2074 ohci->ir_context_mask |= 1 << index; 2075 ohci->ir_context_channels |= 1ULL << base->channel; 2076 } 2077 2078 spin_unlock_irqrestore(&ohci->lock, flags); 2079 } 2080 2081 static int ohci_queue_iso_transmit(struct fw_iso_context *base, 2082 struct fw_iso_packet *packet, 2083 struct fw_iso_buffer *buffer, 2084 unsigned long payload) 2085 { 2086 struct iso_context *ctx = container_of(base, struct iso_context, base); 2087 struct descriptor *d, *last, *pd; 2088 struct fw_iso_packet *p; 2089 __le32 *header; 2090 dma_addr_t d_bus, page_bus; 2091 u32 z, header_z, payload_z, irq; 2092 u32 payload_index, payload_end_index, next_page_index; 2093 int page, end_page, i, length, offset; 2094 2095 p = packet; 2096 payload_index = payload; 2097 2098 if (p->skip) 2099 z = 1; 2100 else 2101 z = 2; 2102 if (p->header_length > 0) 2103 z++; 2104 2105 /* Determine the first page the payload isn't contained in. */ 2106 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; 2107 if (p->payload_length > 0) 2108 payload_z = end_page - (payload_index >> PAGE_SHIFT); 2109 else 2110 payload_z = 0; 2111 2112 z += payload_z; 2113 2114 /* Get header size in number of descriptors. */ 2115 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); 2116 2117 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 2118 if (d == NULL) 2119 return -ENOMEM; 2120 2121 if (!p->skip) { 2122 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2123 d[0].req_count = cpu_to_le16(8); 2124 /* 2125 * Link the skip address to this descriptor itself. This causes 2126 * a context to skip a cycle whenever lost cycles or FIFO 2127 * overruns occur, without dropping the data. The application 2128 * should then decide whether this is an error condition or not. 2129 * FIXME: Make the context's cycle-lost behaviour configurable? 2130 */ 2131 d[0].branch_address = cpu_to_le32(d_bus | z); 2132 2133 header = (__le32 *) &d[1]; 2134 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2135 IT_HEADER_TAG(p->tag) | 2136 IT_HEADER_TCODE(TCODE_STREAM_DATA) | 2137 IT_HEADER_CHANNEL(ctx->base.channel) | 2138 IT_HEADER_SPEED(ctx->base.speed)); 2139 header[1] = 2140 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 2141 p->payload_length)); 2142 } 2143 2144 if (p->header_length > 0) { 2145 d[2].req_count = cpu_to_le16(p->header_length); 2146 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); 2147 memcpy(&d[z], p->header, p->header_length); 2148 } 2149 2150 pd = d + z - payload_z; 2151 payload_end_index = payload_index + p->payload_length; 2152 for (i = 0; i < payload_z; i++) { 2153 page = payload_index >> PAGE_SHIFT; 2154 offset = payload_index & ~PAGE_MASK; 2155 next_page_index = (page + 1) << PAGE_SHIFT; 2156 length = 2157 min(next_page_index, payload_end_index) - payload_index; 2158 pd[i].req_count = cpu_to_le16(length); 2159 2160 page_bus = page_private(buffer->pages[page]); 2161 pd[i].data_address = cpu_to_le32(page_bus + offset); 2162 2163 payload_index += length; 2164 } 2165 2166 if (p->interrupt) 2167 irq = DESCRIPTOR_IRQ_ALWAYS; 2168 else 2169 irq = DESCRIPTOR_NO_IRQ; 2170 2171 last = z == 2 ? d : d + z - 1; 2172 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 2173 DESCRIPTOR_STATUS | 2174 DESCRIPTOR_BRANCH_ALWAYS | 2175 irq); 2176 2177 context_append(&ctx->context, d, z, header_z); 2178 2179 return 0; 2180 } 2181 2182 static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2183 struct fw_iso_packet *packet, 2184 struct fw_iso_buffer *buffer, 2185 unsigned long payload) 2186 { 2187 struct iso_context *ctx = container_of(base, struct iso_context, base); 2188 struct descriptor *d, *pd; 2189 struct fw_iso_packet *p = packet; 2190 dma_addr_t d_bus, page_bus; 2191 u32 z, header_z, rest; 2192 int i, j, length; 2193 int page, offset, packet_count, header_size, payload_per_buffer; 2194 2195 /* 2196 * The OHCI controller puts the isochronous header and trailer in the 2197 * buffer, so we need at least 8 bytes. 2198 */ 2199 packet_count = p->header_length / ctx->base.header_size; 2200 header_size = max(ctx->base.header_size, (size_t)8); 2201 2202 /* Get header size in number of descriptors. */ 2203 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2204 page = payload >> PAGE_SHIFT; 2205 offset = payload & ~PAGE_MASK; 2206 payload_per_buffer = p->payload_length / packet_count; 2207 2208 for (i = 0; i < packet_count; i++) { 2209 /* d points to the header descriptor */ 2210 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; 2211 d = context_get_descriptors(&ctx->context, 2212 z + header_z, &d_bus); 2213 if (d == NULL) 2214 return -ENOMEM; 2215 2216 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 2217 DESCRIPTOR_INPUT_MORE); 2218 if (p->skip && i == 0) 2219 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2220 d->req_count = cpu_to_le16(header_size); 2221 d->res_count = d->req_count; 2222 d->transfer_status = 0; 2223 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 2224 2225 rest = payload_per_buffer; 2226 pd = d; 2227 for (j = 1; j < z; j++) { 2228 pd++; 2229 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2230 DESCRIPTOR_INPUT_MORE); 2231 2232 if (offset + rest < PAGE_SIZE) 2233 length = rest; 2234 else 2235 length = PAGE_SIZE - offset; 2236 pd->req_count = cpu_to_le16(length); 2237 pd->res_count = pd->req_count; 2238 pd->transfer_status = 0; 2239 2240 page_bus = page_private(buffer->pages[page]); 2241 pd->data_address = cpu_to_le32(page_bus + offset); 2242 2243 offset = (offset + length) & ~PAGE_MASK; 2244 rest -= length; 2245 if (offset == 0) 2246 page++; 2247 } 2248 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2249 DESCRIPTOR_INPUT_LAST | 2250 DESCRIPTOR_BRANCH_ALWAYS); 2251 if (p->interrupt && i == packet_count - 1) 2252 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2253 2254 context_append(&ctx->context, d, z, header_z); 2255 } 2256 2257 return 0; 2258 } 2259 2260 static int ohci_queue_iso(struct fw_iso_context *base, 2261 struct fw_iso_packet *packet, 2262 struct fw_iso_buffer *buffer, 2263 unsigned long payload) 2264 { 2265 struct iso_context *ctx = container_of(base, struct iso_context, base); 2266 unsigned long flags; 2267 int ret; 2268 2269 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2270 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2271 ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2272 else 2273 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2274 buffer, payload); 2275 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2276 2277 return ret; 2278 } 2279 2280 static const struct fw_card_driver ohci_driver = { 2281 .enable = ohci_enable, 2282 .update_phy_reg = ohci_update_phy_reg, 2283 .set_config_rom = ohci_set_config_rom, 2284 .send_request = ohci_send_request, 2285 .send_response = ohci_send_response, 2286 .cancel_packet = ohci_cancel_packet, 2287 .enable_phys_dma = ohci_enable_phys_dma, 2288 .get_cycle_time = ohci_get_cycle_time, 2289 2290 .allocate_iso_context = ohci_allocate_iso_context, 2291 .free_iso_context = ohci_free_iso_context, 2292 .queue_iso = ohci_queue_iso, 2293 .start_iso = ohci_start_iso, 2294 .stop_iso = ohci_stop_iso, 2295 }; 2296 2297 #ifdef CONFIG_PPC_PMAC 2298 static void ohci_pmac_on(struct pci_dev *dev) 2299 { 2300 if (machine_is(powermac)) { 2301 struct device_node *ofn = pci_device_to_OF_node(dev); 2302 2303 if (ofn) { 2304 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); 2305 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 2306 } 2307 } 2308 } 2309 2310 static void ohci_pmac_off(struct pci_dev *dev) 2311 { 2312 if (machine_is(powermac)) { 2313 struct device_node *ofn = pci_device_to_OF_node(dev); 2314 2315 if (ofn) { 2316 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); 2317 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); 2318 } 2319 } 2320 } 2321 #else 2322 #define ohci_pmac_on(dev) 2323 #define ohci_pmac_off(dev) 2324 #endif /* CONFIG_PPC_PMAC */ 2325 2326 static int __devinit pci_probe(struct pci_dev *dev, 2327 const struct pci_device_id *ent) 2328 { 2329 struct fw_ohci *ohci; 2330 u32 bus_options, max_receive, link_speed, version; 2331 u64 guid; 2332 int i, err, n_ir, n_it; 2333 size_t size; 2334 2335 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2336 if (ohci == NULL) { 2337 err = -ENOMEM; 2338 goto fail; 2339 } 2340 2341 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2342 2343 ohci_pmac_on(dev); 2344 2345 err = pci_enable_device(dev); 2346 if (err) { 2347 fw_error("Failed to enable OHCI hardware\n"); 2348 goto fail_free; 2349 } 2350 2351 pci_set_master(dev); 2352 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 2353 pci_set_drvdata(dev, ohci); 2354 2355 spin_lock_init(&ohci->lock); 2356 2357 tasklet_init(&ohci->bus_reset_tasklet, 2358 bus_reset_tasklet, (unsigned long)ohci); 2359 2360 err = pci_request_region(dev, 0, ohci_driver_name); 2361 if (err) { 2362 fw_error("MMIO resource unavailable\n"); 2363 goto fail_disable; 2364 } 2365 2366 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 2367 if (ohci->registers == NULL) { 2368 fw_error("Failed to remap registers\n"); 2369 err = -ENXIO; 2370 goto fail_iomem; 2371 } 2372 2373 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 2374 if (ohci_quirks[i].vendor == dev->vendor && 2375 (ohci_quirks[i].device == dev->device || 2376 ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) { 2377 ohci->quirks = ohci_quirks[i].flags; 2378 break; 2379 } 2380 if (param_quirks) 2381 ohci->quirks = param_quirks; 2382 2383 ar_context_init(&ohci->ar_request_ctx, ohci, 2384 OHCI1394_AsReqRcvContextControlSet); 2385 2386 ar_context_init(&ohci->ar_response_ctx, ohci, 2387 OHCI1394_AsRspRcvContextControlSet); 2388 2389 context_init(&ohci->at_request_ctx, ohci, 2390 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 2391 2392 context_init(&ohci->at_response_ctx, ohci, 2393 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2394 2395 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2396 ohci->ir_context_channels = ~0ULL; 2397 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 2398 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 2399 n_ir = hweight32(ohci->ir_context_mask); 2400 size = sizeof(struct iso_context) * n_ir; 2401 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2402 2403 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2404 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 2405 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2406 n_it = hweight32(ohci->it_context_mask); 2407 size = sizeof(struct iso_context) * n_it; 2408 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2409 2410 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2411 err = -ENOMEM; 2412 goto fail_contexts; 2413 } 2414 2415 /* self-id dma buffer allocation */ 2416 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, 2417 SELF_ID_BUF_SIZE, 2418 &ohci->self_id_bus, 2419 GFP_KERNEL); 2420 if (ohci->self_id_cpu == NULL) { 2421 err = -ENOMEM; 2422 goto fail_contexts; 2423 } 2424 2425 bus_options = reg_read(ohci, OHCI1394_BusOptions); 2426 max_receive = (bus_options >> 12) & 0xf; 2427 link_speed = bus_options & 0x7; 2428 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | 2429 reg_read(ohci, OHCI1394_GUIDLo); 2430 2431 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 2432 if (err) 2433 goto fail_self_id; 2434 2435 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2436 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " 2437 "%d IR + %d IT contexts, quirks 0x%x\n", 2438 dev_name(&dev->dev), version >> 16, version & 0xff, 2439 n_ir, n_it, ohci->quirks); 2440 2441 return 0; 2442 2443 fail_self_id: 2444 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2445 ohci->self_id_cpu, ohci->self_id_bus); 2446 fail_contexts: 2447 kfree(ohci->ir_context_list); 2448 kfree(ohci->it_context_list); 2449 context_release(&ohci->at_response_ctx); 2450 context_release(&ohci->at_request_ctx); 2451 ar_context_release(&ohci->ar_response_ctx); 2452 ar_context_release(&ohci->ar_request_ctx); 2453 pci_iounmap(dev, ohci->registers); 2454 fail_iomem: 2455 pci_release_region(dev, 0); 2456 fail_disable: 2457 pci_disable_device(dev); 2458 fail_free: 2459 kfree(&ohci->card); 2460 ohci_pmac_off(dev); 2461 fail: 2462 if (err == -ENOMEM) 2463 fw_error("Out of memory\n"); 2464 2465 return err; 2466 } 2467 2468 static void pci_remove(struct pci_dev *dev) 2469 { 2470 struct fw_ohci *ohci; 2471 2472 ohci = pci_get_drvdata(dev); 2473 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2474 flush_writes(ohci); 2475 fw_core_remove_card(&ohci->card); 2476 2477 /* 2478 * FIXME: Fail all pending packets here, now that the upper 2479 * layers can't queue any more. 2480 */ 2481 2482 software_reset(ohci); 2483 free_irq(dev->irq, ohci); 2484 2485 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) 2486 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2487 ohci->next_config_rom, ohci->next_config_rom_bus); 2488 if (ohci->config_rom) 2489 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2490 ohci->config_rom, ohci->config_rom_bus); 2491 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, 2492 ohci->self_id_cpu, ohci->self_id_bus); 2493 ar_context_release(&ohci->ar_request_ctx); 2494 ar_context_release(&ohci->ar_response_ctx); 2495 context_release(&ohci->at_request_ctx); 2496 context_release(&ohci->at_response_ctx); 2497 kfree(ohci->it_context_list); 2498 kfree(ohci->ir_context_list); 2499 pci_iounmap(dev, ohci->registers); 2500 pci_release_region(dev, 0); 2501 pci_disable_device(dev); 2502 kfree(&ohci->card); 2503 ohci_pmac_off(dev); 2504 2505 fw_notify("Removed fw-ohci device.\n"); 2506 } 2507 2508 #ifdef CONFIG_PM 2509 static int pci_suspend(struct pci_dev *dev, pm_message_t state) 2510 { 2511 struct fw_ohci *ohci = pci_get_drvdata(dev); 2512 int err; 2513 2514 software_reset(ohci); 2515 free_irq(dev->irq, ohci); 2516 err = pci_save_state(dev); 2517 if (err) { 2518 fw_error("pci_save_state failed\n"); 2519 return err; 2520 } 2521 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 2522 if (err) 2523 fw_error("pci_set_power_state failed with %d\n", err); 2524 ohci_pmac_off(dev); 2525 2526 return 0; 2527 } 2528 2529 static int pci_resume(struct pci_dev *dev) 2530 { 2531 struct fw_ohci *ohci = pci_get_drvdata(dev); 2532 int err; 2533 2534 ohci_pmac_on(dev); 2535 pci_set_power_state(dev, PCI_D0); 2536 pci_restore_state(dev); 2537 err = pci_enable_device(dev); 2538 if (err) { 2539 fw_error("pci_enable_device failed\n"); 2540 return err; 2541 } 2542 2543 return ohci_enable(&ohci->card, NULL, 0); 2544 } 2545 #endif 2546 2547 static const struct pci_device_id pci_table[] = { 2548 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 2549 { } 2550 }; 2551 2552 MODULE_DEVICE_TABLE(pci, pci_table); 2553 2554 static struct pci_driver fw_ohci_pci_driver = { 2555 .name = ohci_driver_name, 2556 .id_table = pci_table, 2557 .probe = pci_probe, 2558 .remove = pci_remove, 2559 #ifdef CONFIG_PM 2560 .resume = pci_resume, 2561 .suspend = pci_suspend, 2562 #endif 2563 }; 2564 2565 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 2566 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 2567 MODULE_LICENSE("GPL"); 2568 2569 /* Provide a module alias so root-on-sbp2 initrds don't break. */ 2570 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE 2571 MODULE_ALIAS("ohci1394"); 2572 #endif 2573 2574 static int __init fw_ohci_init(void) 2575 { 2576 return pci_register_driver(&fw_ohci_pci_driver); 2577 } 2578 2579 static void __exit fw_ohci_cleanup(void) 2580 { 2581 pci_unregister_driver(&fw_ohci_pci_driver); 2582 } 2583 2584 module_init(fw_ohci_init); 2585 module_exit(fw_ohci_cleanup); 2586