1 /* 2 * Driver for OHCI 1394 controllers 3 * 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #include <linux/bitops.h> 22 #include <linux/bug.h> 23 #include <linux/compiler.h> 24 #include <linux/delay.h> 25 #include <linux/device.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/firewire.h> 28 #include <linux/firewire-constants.h> 29 #include <linux/init.h> 30 #include <linux/interrupt.h> 31 #include <linux/io.h> 32 #include <linux/kernel.h> 33 #include <linux/list.h> 34 #include <linux/mm.h> 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/mutex.h> 38 #include <linux/pci.h> 39 #include <linux/pci_ids.h> 40 #include <linux/slab.h> 41 #include <linux/spinlock.h> 42 #include <linux/string.h> 43 #include <linux/time.h> 44 #include <linux/vmalloc.h> 45 #include <linux/workqueue.h> 46 47 #include <asm/byteorder.h> 48 #include <asm/page.h> 49 50 #ifdef CONFIG_PPC_PMAC 51 #include <asm/pmac_feature.h> 52 #endif 53 54 #include "core.h" 55 #include "ohci.h" 56 57 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) 58 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) 59 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) 60 61 #define DESCRIPTOR_OUTPUT_MORE 0 62 #define DESCRIPTOR_OUTPUT_LAST (1 << 12) 63 #define DESCRIPTOR_INPUT_MORE (2 << 12) 64 #define DESCRIPTOR_INPUT_LAST (3 << 12) 65 #define DESCRIPTOR_STATUS (1 << 11) 66 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 67 #define DESCRIPTOR_PING (1 << 7) 68 #define DESCRIPTOR_YY (1 << 6) 69 #define DESCRIPTOR_NO_IRQ (0 << 4) 70 #define DESCRIPTOR_IRQ_ERROR (1 << 4) 71 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 72 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 73 #define DESCRIPTOR_WAIT (3 << 0) 74 75 #define DESCRIPTOR_CMD (0xf << 12) 76 77 struct descriptor { 78 __le16 req_count; 79 __le16 control; 80 __le32 data_address; 81 __le32 branch_address; 82 __le16 res_count; 83 __le16 transfer_status; 84 } __attribute__((aligned(16))); 85 86 #define CONTROL_SET(regs) (regs) 87 #define CONTROL_CLEAR(regs) ((regs) + 4) 88 #define COMMAND_PTR(regs) ((regs) + 12) 89 #define CONTEXT_MATCH(regs) ((regs) + 16) 90 91 #define AR_BUFFER_SIZE (32*1024) 92 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) 93 /* we need at least two pages for proper list management */ 94 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) 95 96 #define MAX_ASYNC_PAYLOAD 4096 97 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) 98 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) 99 100 struct ar_context { 101 struct fw_ohci *ohci; 102 struct page *pages[AR_BUFFERS]; 103 void *buffer; 104 struct descriptor *descriptors; 105 dma_addr_t descriptors_bus; 106 void *pointer; 107 unsigned int last_buffer_index; 108 u32 regs; 109 struct tasklet_struct tasklet; 110 }; 111 112 struct context; 113 114 typedef int (*descriptor_callback_t)(struct context *ctx, 115 struct descriptor *d, 116 struct descriptor *last); 117 118 /* 119 * A buffer that contains a block of DMA-able coherent memory used for 120 * storing a portion of a DMA descriptor program. 121 */ 122 struct descriptor_buffer { 123 struct list_head list; 124 dma_addr_t buffer_bus; 125 size_t buffer_size; 126 size_t used; 127 struct descriptor buffer[0]; 128 }; 129 130 struct context { 131 struct fw_ohci *ohci; 132 u32 regs; 133 int total_allocation; 134 u32 current_bus; 135 bool running; 136 bool flushing; 137 138 /* 139 * List of page-sized buffers for storing DMA descriptors. 140 * Head of list contains buffers in use and tail of list contains 141 * free buffers. 142 */ 143 struct list_head buffer_list; 144 145 /* 146 * Pointer to a buffer inside buffer_list that contains the tail 147 * end of the current DMA program. 148 */ 149 struct descriptor_buffer *buffer_tail; 150 151 /* 152 * The descriptor containing the branch address of the first 153 * descriptor that has not yet been filled by the device. 154 */ 155 struct descriptor *last; 156 157 /* 158 * The last descriptor block in the DMA program. It contains the branch 159 * address that must be updated upon appending a new descriptor. 160 */ 161 struct descriptor *prev; 162 int prev_z; 163 164 descriptor_callback_t callback; 165 166 struct tasklet_struct tasklet; 167 }; 168 169 #define IT_HEADER_SY(v) ((v) << 0) 170 #define IT_HEADER_TCODE(v) ((v) << 4) 171 #define IT_HEADER_CHANNEL(v) ((v) << 8) 172 #define IT_HEADER_TAG(v) ((v) << 14) 173 #define IT_HEADER_SPEED(v) ((v) << 16) 174 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 175 176 struct iso_context { 177 struct fw_iso_context base; 178 struct context context; 179 void *header; 180 size_t header_length; 181 unsigned long flushing_completions; 182 u32 mc_buffer_bus; 183 u16 mc_completed; 184 u16 last_timestamp; 185 u8 sync; 186 u8 tags; 187 }; 188 189 #define CONFIG_ROM_SIZE 1024 190 191 struct fw_ohci { 192 struct fw_card card; 193 194 __iomem char *registers; 195 int node_id; 196 int generation; 197 int request_generation; /* for timestamping incoming requests */ 198 unsigned quirks; 199 unsigned int pri_req_max; 200 u32 bus_time; 201 bool bus_time_running; 202 bool is_root; 203 bool csr_state_setclear_abdicate; 204 int n_ir; 205 int n_it; 206 /* 207 * Spinlock for accessing fw_ohci data. Never call out of 208 * this driver with this lock held. 209 */ 210 spinlock_t lock; 211 212 struct mutex phy_reg_mutex; 213 214 void *misc_buffer; 215 dma_addr_t misc_buffer_bus; 216 217 struct ar_context ar_request_ctx; 218 struct ar_context ar_response_ctx; 219 struct context at_request_ctx; 220 struct context at_response_ctx; 221 222 u32 it_context_support; 223 u32 it_context_mask; /* unoccupied IT contexts */ 224 struct iso_context *it_context_list; 225 u64 ir_context_channels; /* unoccupied channels */ 226 u32 ir_context_support; 227 u32 ir_context_mask; /* unoccupied IR contexts */ 228 struct iso_context *ir_context_list; 229 u64 mc_channels; /* channels in use by the multichannel IR context */ 230 bool mc_allocated; 231 232 __be32 *config_rom; 233 dma_addr_t config_rom_bus; 234 __be32 *next_config_rom; 235 dma_addr_t next_config_rom_bus; 236 __be32 next_header; 237 238 __le32 *self_id; 239 dma_addr_t self_id_bus; 240 struct work_struct bus_reset_work; 241 242 u32 self_id_buffer[512]; 243 }; 244 245 static struct workqueue_struct *selfid_workqueue; 246 247 static inline struct fw_ohci *fw_ohci(struct fw_card *card) 248 { 249 return container_of(card, struct fw_ohci, card); 250 } 251 252 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 253 #define IR_CONTEXT_BUFFER_FILL 0x80000000 254 #define IR_CONTEXT_ISOCH_HEADER 0x40000000 255 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 256 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 257 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 258 259 #define CONTEXT_RUN 0x8000 260 #define CONTEXT_WAKE 0x1000 261 #define CONTEXT_DEAD 0x0800 262 #define CONTEXT_ACTIVE 0x0400 263 264 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf 265 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 266 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 267 268 #define OHCI1394_REGISTER_SIZE 0x800 269 #define OHCI1394_PCI_HCI_Control 0x40 270 #define SELF_ID_BUF_SIZE 0x800 271 #define OHCI_TCODE_PHY_PACKET 0x0e 272 #define OHCI_VERSION_1_1 0x010010 273 274 static char ohci_driver_name[] = KBUILD_MODNAME; 275 276 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd 277 #define PCI_DEVICE_ID_AGERE_FW643 0x5901 278 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001 279 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 280 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 281 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 282 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025 283 #define PCI_DEVICE_ID_VIA_VT630X 0x3044 284 #define PCI_REV_ID_VIA_VT6306 0x46 285 #define PCI_DEVICE_ID_VIA_VT6315 0x3403 286 287 #define QUIRK_CYCLE_TIMER 0x1 288 #define QUIRK_RESET_PACKET 0x2 289 #define QUIRK_BE_HEADERS 0x4 290 #define QUIRK_NO_1394A 0x8 291 #define QUIRK_NO_MSI 0x10 292 #define QUIRK_TI_SLLZ059 0x20 293 #define QUIRK_IR_WAKE 0x40 294 295 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 296 static const struct { 297 unsigned short vendor, device, revision, flags; 298 } ohci_quirks[] = { 299 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, 300 QUIRK_CYCLE_TIMER}, 301 302 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, 303 QUIRK_BE_HEADERS}, 304 305 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 306 QUIRK_NO_MSI}, 307 308 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, 309 QUIRK_RESET_PACKET}, 310 311 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 312 QUIRK_NO_MSI}, 313 314 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 315 QUIRK_CYCLE_TIMER}, 316 317 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, 318 QUIRK_NO_MSI}, 319 320 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 321 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 322 323 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 324 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 325 326 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID, 327 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, 328 329 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID, 330 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, 331 332 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, 333 QUIRK_RESET_PACKET}, 334 335 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306, 336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, 337 338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, 339 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI}, 340 341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, 342 QUIRK_NO_MSI}, 343 344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 346 }; 347 348 /* This overrides anything that was found in ohci_quirks[]. */ 349 static int param_quirks; 350 module_param_named(quirks, param_quirks, int, 0644); 351 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" 352 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 353 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 354 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS) 355 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 356 ", disable MSI = " __stringify(QUIRK_NO_MSI) 357 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) 358 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) 359 ")"); 360 361 #define OHCI_PARAM_DEBUG_AT_AR 1 362 #define OHCI_PARAM_DEBUG_SELFIDS 2 363 #define OHCI_PARAM_DEBUG_IRQS 4 364 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 365 366 static int param_debug; 367 module_param_named(debug, param_debug, int, 0644); 368 MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 369 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) 370 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) 371 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) 372 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 373 ", or a combination, or all = -1)"); 374 375 static bool param_remote_dma; 376 module_param_named(remote_dma, param_remote_dma, bool, 0444); 377 MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)"); 378 379 static void log_irqs(struct fw_ohci *ohci, u32 evt) 380 { 381 if (likely(!(param_debug & 382 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 383 return; 384 385 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && 386 !(evt & OHCI1394_busReset)) 387 return; 388 389 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 390 evt & OHCI1394_selfIDComplete ? " selfID" : "", 391 evt & OHCI1394_RQPkt ? " AR_req" : "", 392 evt & OHCI1394_RSPkt ? " AR_resp" : "", 393 evt & OHCI1394_reqTxComplete ? " AT_req" : "", 394 evt & OHCI1394_respTxComplete ? " AT_resp" : "", 395 evt & OHCI1394_isochRx ? " IR" : "", 396 evt & OHCI1394_isochTx ? " IT" : "", 397 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 398 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 399 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 400 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 401 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 402 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", 403 evt & OHCI1394_busReset ? " busReset" : "", 404 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 405 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 406 OHCI1394_respTxComplete | OHCI1394_isochRx | 407 OHCI1394_isochTx | OHCI1394_postedWriteErr | 408 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 409 OHCI1394_cycleInconsistent | 410 OHCI1394_regAccessFail | OHCI1394_busReset) 411 ? " ?" : ""); 412 } 413 414 static const char *speed[] = { 415 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", 416 }; 417 static const char *power[] = { 418 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", 419 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", 420 }; 421 static const char port[] = { '.', '-', 'p', 'c', }; 422 423 static char _p(u32 *s, int shift) 424 { 425 return port[*s >> shift & 3]; 426 } 427 428 static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) 429 { 430 u32 *s; 431 432 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 433 return; 434 435 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n", 436 self_id_count, generation, ohci->node_id); 437 438 for (s = ohci->self_id_buffer; self_id_count--; ++s) 439 if ((*s & 1 << 23) == 0) 440 ohci_notice(ohci, 441 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n", 442 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 443 speed[*s >> 14 & 3], *s >> 16 & 63, 444 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 445 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 446 else 447 ohci_notice(ohci, 448 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", 449 *s, *s >> 24 & 63, 450 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 451 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 452 } 453 454 static const char *evts[] = { 455 [0x00] = "evt_no_status", [0x01] = "-reserved-", 456 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", 457 [0x04] = "evt_underrun", [0x05] = "evt_overrun", 458 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", 459 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", 460 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", 461 [0x0c] = "-reserved-", [0x0d] = "-reserved-", 462 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", 463 [0x10] = "-reserved-", [0x11] = "ack_complete", 464 [0x12] = "ack_pending ", [0x13] = "-reserved-", 465 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", 466 [0x16] = "ack_busy_B", [0x17] = "-reserved-", 467 [0x18] = "-reserved-", [0x19] = "-reserved-", 468 [0x1a] = "-reserved-", [0x1b] = "ack_tardy", 469 [0x1c] = "-reserved-", [0x1d] = "ack_data_error", 470 [0x1e] = "ack_type_error", [0x1f] = "-reserved-", 471 [0x20] = "pending/cancelled", 472 }; 473 static const char *tcodes[] = { 474 [0x0] = "QW req", [0x1] = "BW req", 475 [0x2] = "W resp", [0x3] = "-reserved-", 476 [0x4] = "QR req", [0x5] = "BR req", 477 [0x6] = "QR resp", [0x7] = "BR resp", 478 [0x8] = "cycle start", [0x9] = "Lk req", 479 [0xa] = "async stream packet", [0xb] = "Lk resp", 480 [0xc] = "-reserved-", [0xd] = "-reserved-", 481 [0xe] = "link internal", [0xf] = "-reserved-", 482 }; 483 484 static void log_ar_at_event(struct fw_ohci *ohci, 485 char dir, int speed, u32 *header, int evt) 486 { 487 int tcode = header[0] >> 4 & 0xf; 488 char specific[12]; 489 490 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) 491 return; 492 493 if (unlikely(evt >= ARRAY_SIZE(evts))) 494 evt = 0x1f; 495 496 if (evt == OHCI1394_evt_bus_reset) { 497 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", 498 dir, (header[2] >> 16) & 0xff); 499 return; 500 } 501 502 switch (tcode) { 503 case 0x0: case 0x6: case 0x8: 504 snprintf(specific, sizeof(specific), " = %08x", 505 be32_to_cpu((__force __be32)header[3])); 506 break; 507 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: 508 snprintf(specific, sizeof(specific), " %x,%x", 509 header[3] >> 16, header[3] & 0xffff); 510 break; 511 default: 512 specific[0] = '\0'; 513 } 514 515 switch (tcode) { 516 case 0xa: 517 ohci_notice(ohci, "A%c %s, %s\n", 518 dir, evts[evt], tcodes[tcode]); 519 break; 520 case 0xe: 521 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", 522 dir, evts[evt], header[1], header[2]); 523 break; 524 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 525 ohci_notice(ohci, 526 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", 527 dir, speed, header[0] >> 10 & 0x3f, 528 header[1] >> 16, header[0] >> 16, evts[evt], 529 tcodes[tcode], header[1] & 0xffff, header[2], specific); 530 break; 531 default: 532 ohci_notice(ohci, 533 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", 534 dir, speed, header[0] >> 10 & 0x3f, 535 header[1] >> 16, header[0] >> 16, evts[evt], 536 tcodes[tcode], specific); 537 } 538 } 539 540 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 541 { 542 writel(data, ohci->registers + offset); 543 } 544 545 static inline u32 reg_read(const struct fw_ohci *ohci, int offset) 546 { 547 return readl(ohci->registers + offset); 548 } 549 550 static inline void flush_writes(const struct fw_ohci *ohci) 551 { 552 /* Do a dummy read to flush writes. */ 553 reg_read(ohci, OHCI1394_Version); 554 } 555 556 /* 557 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and 558 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex. 559 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg() 560 * directly. Exceptions are intrinsically serialized contexts like pci_probe. 561 */ 562 static int read_phy_reg(struct fw_ohci *ohci, int addr) 563 { 564 u32 val; 565 int i; 566 567 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 568 for (i = 0; i < 3 + 100; i++) { 569 val = reg_read(ohci, OHCI1394_PhyControl); 570 if (!~val) 571 return -ENODEV; /* Card was ejected. */ 572 573 if (val & OHCI1394_PhyControl_ReadDone) 574 return OHCI1394_PhyControl_ReadData(val); 575 576 /* 577 * Try a few times without waiting. Sleeping is necessary 578 * only when the link/PHY interface is busy. 579 */ 580 if (i >= 3) 581 msleep(1); 582 } 583 ohci_err(ohci, "failed to read phy reg %d\n", addr); 584 dump_stack(); 585 586 return -EBUSY; 587 } 588 589 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) 590 { 591 int i; 592 593 reg_write(ohci, OHCI1394_PhyControl, 594 OHCI1394_PhyControl_Write(addr, val)); 595 for (i = 0; i < 3 + 100; i++) { 596 val = reg_read(ohci, OHCI1394_PhyControl); 597 if (!~val) 598 return -ENODEV; /* Card was ejected. */ 599 600 if (!(val & OHCI1394_PhyControl_WritePending)) 601 return 0; 602 603 if (i >= 3) 604 msleep(1); 605 } 606 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val); 607 dump_stack(); 608 609 return -EBUSY; 610 } 611 612 static int update_phy_reg(struct fw_ohci *ohci, int addr, 613 int clear_bits, int set_bits) 614 { 615 int ret = read_phy_reg(ohci, addr); 616 if (ret < 0) 617 return ret; 618 619 /* 620 * The interrupt status bits are cleared by writing a one bit. 621 * Avoid clearing them unless explicitly requested in set_bits. 622 */ 623 if (addr == 5) 624 clear_bits |= PHY_INT_STATUS_BITS; 625 626 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); 627 } 628 629 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) 630 { 631 int ret; 632 633 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); 634 if (ret < 0) 635 return ret; 636 637 return read_phy_reg(ohci, addr); 638 } 639 640 static int ohci_read_phy_reg(struct fw_card *card, int addr) 641 { 642 struct fw_ohci *ohci = fw_ohci(card); 643 int ret; 644 645 mutex_lock(&ohci->phy_reg_mutex); 646 ret = read_phy_reg(ohci, addr); 647 mutex_unlock(&ohci->phy_reg_mutex); 648 649 return ret; 650 } 651 652 static int ohci_update_phy_reg(struct fw_card *card, int addr, 653 int clear_bits, int set_bits) 654 { 655 struct fw_ohci *ohci = fw_ohci(card); 656 int ret; 657 658 mutex_lock(&ohci->phy_reg_mutex); 659 ret = update_phy_reg(ohci, addr, clear_bits, set_bits); 660 mutex_unlock(&ohci->phy_reg_mutex); 661 662 return ret; 663 } 664 665 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) 666 { 667 return page_private(ctx->pages[i]); 668 } 669 670 static void ar_context_link_page(struct ar_context *ctx, unsigned int index) 671 { 672 struct descriptor *d; 673 674 d = &ctx->descriptors[index]; 675 d->branch_address &= cpu_to_le32(~0xf); 676 d->res_count = cpu_to_le16(PAGE_SIZE); 677 d->transfer_status = 0; 678 679 wmb(); /* finish init of new descriptors before branch_address update */ 680 d = &ctx->descriptors[ctx->last_buffer_index]; 681 d->branch_address |= cpu_to_le32(1); 682 683 ctx->last_buffer_index = index; 684 685 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 686 } 687 688 static void ar_context_release(struct ar_context *ctx) 689 { 690 unsigned int i; 691 692 vunmap(ctx->buffer); 693 694 for (i = 0; i < AR_BUFFERS; i++) 695 if (ctx->pages[i]) { 696 dma_unmap_page(ctx->ohci->card.device, 697 ar_buffer_bus(ctx, i), 698 PAGE_SIZE, DMA_FROM_DEVICE); 699 __free_page(ctx->pages[i]); 700 } 701 } 702 703 static void ar_context_abort(struct ar_context *ctx, const char *error_msg) 704 { 705 struct fw_ohci *ohci = ctx->ohci; 706 707 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { 708 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 709 flush_writes(ohci); 710 711 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg); 712 } 713 /* FIXME: restart? */ 714 } 715 716 static inline unsigned int ar_next_buffer_index(unsigned int index) 717 { 718 return (index + 1) % AR_BUFFERS; 719 } 720 721 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) 722 { 723 return ar_next_buffer_index(ctx->last_buffer_index); 724 } 725 726 /* 727 * We search for the buffer that contains the last AR packet DMA data written 728 * by the controller. 729 */ 730 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, 731 unsigned int *buffer_offset) 732 { 733 unsigned int i, next_i, last = ctx->last_buffer_index; 734 __le16 res_count, next_res_count; 735 736 i = ar_first_buffer_index(ctx); 737 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); 738 739 /* A buffer that is not yet completely filled must be the last one. */ 740 while (i != last && res_count == 0) { 741 742 /* Peek at the next descriptor. */ 743 next_i = ar_next_buffer_index(i); 744 rmb(); /* read descriptors in order */ 745 next_res_count = ACCESS_ONCE( 746 ctx->descriptors[next_i].res_count); 747 /* 748 * If the next descriptor is still empty, we must stop at this 749 * descriptor. 750 */ 751 if (next_res_count == cpu_to_le16(PAGE_SIZE)) { 752 /* 753 * The exception is when the DMA data for one packet is 754 * split over three buffers; in this case, the middle 755 * buffer's descriptor might be never updated by the 756 * controller and look still empty, and we have to peek 757 * at the third one. 758 */ 759 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { 760 next_i = ar_next_buffer_index(next_i); 761 rmb(); 762 next_res_count = ACCESS_ONCE( 763 ctx->descriptors[next_i].res_count); 764 if (next_res_count != cpu_to_le16(PAGE_SIZE)) 765 goto next_buffer_is_active; 766 } 767 768 break; 769 } 770 771 next_buffer_is_active: 772 i = next_i; 773 res_count = next_res_count; 774 } 775 776 rmb(); /* read res_count before the DMA data */ 777 778 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); 779 if (*buffer_offset > PAGE_SIZE) { 780 *buffer_offset = 0; 781 ar_context_abort(ctx, "corrupted descriptor"); 782 } 783 784 return i; 785 } 786 787 static void ar_sync_buffers_for_cpu(struct ar_context *ctx, 788 unsigned int end_buffer_index, 789 unsigned int end_buffer_offset) 790 { 791 unsigned int i; 792 793 i = ar_first_buffer_index(ctx); 794 while (i != end_buffer_index) { 795 dma_sync_single_for_cpu(ctx->ohci->card.device, 796 ar_buffer_bus(ctx, i), 797 PAGE_SIZE, DMA_FROM_DEVICE); 798 i = ar_next_buffer_index(i); 799 } 800 if (end_buffer_offset > 0) 801 dma_sync_single_for_cpu(ctx->ohci->card.device, 802 ar_buffer_bus(ctx, i), 803 end_buffer_offset, DMA_FROM_DEVICE); 804 } 805 806 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 807 #define cond_le32_to_cpu(v) \ 808 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) 809 #else 810 #define cond_le32_to_cpu(v) le32_to_cpu(v) 811 #endif 812 813 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) 814 { 815 struct fw_ohci *ohci = ctx->ohci; 816 struct fw_packet p; 817 u32 status, length, tcode; 818 int evt; 819 820 p.header[0] = cond_le32_to_cpu(buffer[0]); 821 p.header[1] = cond_le32_to_cpu(buffer[1]); 822 p.header[2] = cond_le32_to_cpu(buffer[2]); 823 824 tcode = (p.header[0] >> 4) & 0x0f; 825 switch (tcode) { 826 case TCODE_WRITE_QUADLET_REQUEST: 827 case TCODE_READ_QUADLET_RESPONSE: 828 p.header[3] = (__force __u32) buffer[3]; 829 p.header_length = 16; 830 p.payload_length = 0; 831 break; 832 833 case TCODE_READ_BLOCK_REQUEST : 834 p.header[3] = cond_le32_to_cpu(buffer[3]); 835 p.header_length = 16; 836 p.payload_length = 0; 837 break; 838 839 case TCODE_WRITE_BLOCK_REQUEST: 840 case TCODE_READ_BLOCK_RESPONSE: 841 case TCODE_LOCK_REQUEST: 842 case TCODE_LOCK_RESPONSE: 843 p.header[3] = cond_le32_to_cpu(buffer[3]); 844 p.header_length = 16; 845 p.payload_length = p.header[3] >> 16; 846 if (p.payload_length > MAX_ASYNC_PAYLOAD) { 847 ar_context_abort(ctx, "invalid packet length"); 848 return NULL; 849 } 850 break; 851 852 case TCODE_WRITE_RESPONSE: 853 case TCODE_READ_QUADLET_REQUEST: 854 case OHCI_TCODE_PHY_PACKET: 855 p.header_length = 12; 856 p.payload_length = 0; 857 break; 858 859 default: 860 ar_context_abort(ctx, "invalid tcode"); 861 return NULL; 862 } 863 864 p.payload = (void *) buffer + p.header_length; 865 866 /* FIXME: What to do about evt_* errors? */ 867 length = (p.header_length + p.payload_length + 3) / 4; 868 status = cond_le32_to_cpu(buffer[length]); 869 evt = (status >> 16) & 0x1f; 870 871 p.ack = evt - 16; 872 p.speed = (status >> 21) & 0x7; 873 p.timestamp = status & 0xffff; 874 p.generation = ohci->request_generation; 875 876 log_ar_at_event(ohci, 'R', p.speed, p.header, evt); 877 878 /* 879 * Several controllers, notably from NEC and VIA, forget to 880 * write ack_complete status at PHY packet reception. 881 */ 882 if (evt == OHCI1394_evt_no_status && 883 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) 884 p.ack = ACK_COMPLETE; 885 886 /* 887 * The OHCI bus reset handler synthesizes a PHY packet with 888 * the new generation number when a bus reset happens (see 889 * section 8.4.2.3). This helps us determine when a request 890 * was received and make sure we send the response in the same 891 * generation. We only need this for requests; for responses 892 * we use the unique tlabel for finding the matching 893 * request. 894 * 895 * Alas some chips sometimes emit bus reset packets with a 896 * wrong generation. We set the correct generation for these 897 * at a slightly incorrect time (in bus_reset_work). 898 */ 899 if (evt == OHCI1394_evt_bus_reset) { 900 if (!(ohci->quirks & QUIRK_RESET_PACKET)) 901 ohci->request_generation = (p.header[2] >> 16) & 0xff; 902 } else if (ctx == &ohci->ar_request_ctx) { 903 fw_core_handle_request(&ohci->card, &p); 904 } else { 905 fw_core_handle_response(&ohci->card, &p); 906 } 907 908 return buffer + length + 1; 909 } 910 911 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) 912 { 913 void *next; 914 915 while (p < end) { 916 next = handle_ar_packet(ctx, p); 917 if (!next) 918 return p; 919 p = next; 920 } 921 922 return p; 923 } 924 925 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) 926 { 927 unsigned int i; 928 929 i = ar_first_buffer_index(ctx); 930 while (i != end_buffer) { 931 dma_sync_single_for_device(ctx->ohci->card.device, 932 ar_buffer_bus(ctx, i), 933 PAGE_SIZE, DMA_FROM_DEVICE); 934 ar_context_link_page(ctx, i); 935 i = ar_next_buffer_index(i); 936 } 937 } 938 939 static void ar_context_tasklet(unsigned long data) 940 { 941 struct ar_context *ctx = (struct ar_context *)data; 942 unsigned int end_buffer_index, end_buffer_offset; 943 void *p, *end; 944 945 p = ctx->pointer; 946 if (!p) 947 return; 948 949 end_buffer_index = ar_search_last_active_buffer(ctx, 950 &end_buffer_offset); 951 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); 952 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; 953 954 if (end_buffer_index < ar_first_buffer_index(ctx)) { 955 /* 956 * The filled part of the overall buffer wraps around; handle 957 * all packets up to the buffer end here. If the last packet 958 * wraps around, its tail will be visible after the buffer end 959 * because the buffer start pages are mapped there again. 960 */ 961 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; 962 p = handle_ar_packets(ctx, p, buffer_end); 963 if (p < buffer_end) 964 goto error; 965 /* adjust p to point back into the actual buffer */ 966 p -= AR_BUFFERS * PAGE_SIZE; 967 } 968 969 p = handle_ar_packets(ctx, p, end); 970 if (p != end) { 971 if (p > end) 972 ar_context_abort(ctx, "inconsistent descriptor"); 973 goto error; 974 } 975 976 ctx->pointer = p; 977 ar_recycle_buffers(ctx, end_buffer_index); 978 979 return; 980 981 error: 982 ctx->pointer = NULL; 983 } 984 985 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, 986 unsigned int descriptors_offset, u32 regs) 987 { 988 unsigned int i; 989 dma_addr_t dma_addr; 990 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; 991 struct descriptor *d; 992 993 ctx->regs = regs; 994 ctx->ohci = ohci; 995 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 996 997 for (i = 0; i < AR_BUFFERS; i++) { 998 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); 999 if (!ctx->pages[i]) 1000 goto out_of_memory; 1001 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], 1002 0, PAGE_SIZE, DMA_FROM_DEVICE); 1003 if (dma_mapping_error(ohci->card.device, dma_addr)) { 1004 __free_page(ctx->pages[i]); 1005 ctx->pages[i] = NULL; 1006 goto out_of_memory; 1007 } 1008 set_page_private(ctx->pages[i], dma_addr); 1009 } 1010 1011 for (i = 0; i < AR_BUFFERS; i++) 1012 pages[i] = ctx->pages[i]; 1013 for (i = 0; i < AR_WRAPAROUND_PAGES; i++) 1014 pages[AR_BUFFERS + i] = ctx->pages[i]; 1015 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); 1016 if (!ctx->buffer) 1017 goto out_of_memory; 1018 1019 ctx->descriptors = ohci->misc_buffer + descriptors_offset; 1020 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; 1021 1022 for (i = 0; i < AR_BUFFERS; i++) { 1023 d = &ctx->descriptors[i]; 1024 d->req_count = cpu_to_le16(PAGE_SIZE); 1025 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 1026 DESCRIPTOR_STATUS | 1027 DESCRIPTOR_BRANCH_ALWAYS); 1028 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); 1029 d->branch_address = cpu_to_le32(ctx->descriptors_bus + 1030 ar_next_buffer_index(i) * sizeof(struct descriptor)); 1031 } 1032 1033 return 0; 1034 1035 out_of_memory: 1036 ar_context_release(ctx); 1037 1038 return -ENOMEM; 1039 } 1040 1041 static void ar_context_run(struct ar_context *ctx) 1042 { 1043 unsigned int i; 1044 1045 for (i = 0; i < AR_BUFFERS; i++) 1046 ar_context_link_page(ctx, i); 1047 1048 ctx->pointer = ctx->buffer; 1049 1050 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); 1051 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 1052 } 1053 1054 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 1055 { 1056 __le16 branch; 1057 1058 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); 1059 1060 /* figure out which descriptor the branch address goes in */ 1061 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 1062 return d; 1063 else 1064 return d + z - 1; 1065 } 1066 1067 static void context_tasklet(unsigned long data) 1068 { 1069 struct context *ctx = (struct context *) data; 1070 struct descriptor *d, *last; 1071 u32 address; 1072 int z; 1073 struct descriptor_buffer *desc; 1074 1075 desc = list_entry(ctx->buffer_list.next, 1076 struct descriptor_buffer, list); 1077 last = ctx->last; 1078 while (last->branch_address != 0) { 1079 struct descriptor_buffer *old_desc = desc; 1080 address = le32_to_cpu(last->branch_address); 1081 z = address & 0xf; 1082 address &= ~0xf; 1083 ctx->current_bus = address; 1084 1085 /* If the branch address points to a buffer outside of the 1086 * current buffer, advance to the next buffer. */ 1087 if (address < desc->buffer_bus || 1088 address >= desc->buffer_bus + desc->used) 1089 desc = list_entry(desc->list.next, 1090 struct descriptor_buffer, list); 1091 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 1092 last = find_branch_descriptor(d, z); 1093 1094 if (!ctx->callback(ctx, d, last)) 1095 break; 1096 1097 if (old_desc != desc) { 1098 /* If we've advanced to the next buffer, move the 1099 * previous buffer to the free list. */ 1100 unsigned long flags; 1101 old_desc->used = 0; 1102 spin_lock_irqsave(&ctx->ohci->lock, flags); 1103 list_move_tail(&old_desc->list, &ctx->buffer_list); 1104 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1105 } 1106 ctx->last = last; 1107 } 1108 } 1109 1110 /* 1111 * Allocate a new buffer and add it to the list of free buffers for this 1112 * context. Must be called with ohci->lock held. 1113 */ 1114 static int context_add_buffer(struct context *ctx) 1115 { 1116 struct descriptor_buffer *desc; 1117 dma_addr_t uninitialized_var(bus_addr); 1118 int offset; 1119 1120 /* 1121 * 16MB of descriptors should be far more than enough for any DMA 1122 * program. This will catch run-away userspace or DoS attacks. 1123 */ 1124 if (ctx->total_allocation >= 16*1024*1024) 1125 return -ENOMEM; 1126 1127 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, 1128 &bus_addr, GFP_ATOMIC); 1129 if (!desc) 1130 return -ENOMEM; 1131 1132 offset = (void *)&desc->buffer - (void *)desc; 1133 desc->buffer_size = PAGE_SIZE - offset; 1134 desc->buffer_bus = bus_addr + offset; 1135 desc->used = 0; 1136 1137 list_add_tail(&desc->list, &ctx->buffer_list); 1138 ctx->total_allocation += PAGE_SIZE; 1139 1140 return 0; 1141 } 1142 1143 static int context_init(struct context *ctx, struct fw_ohci *ohci, 1144 u32 regs, descriptor_callback_t callback) 1145 { 1146 ctx->ohci = ohci; 1147 ctx->regs = regs; 1148 ctx->total_allocation = 0; 1149 1150 INIT_LIST_HEAD(&ctx->buffer_list); 1151 if (context_add_buffer(ctx) < 0) 1152 return -ENOMEM; 1153 1154 ctx->buffer_tail = list_entry(ctx->buffer_list.next, 1155 struct descriptor_buffer, list); 1156 1157 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 1158 ctx->callback = callback; 1159 1160 /* 1161 * We put a dummy descriptor in the buffer that has a NULL 1162 * branch address and looks like it's been sent. That way we 1163 * have a descriptor to append DMA programs to. 1164 */ 1165 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); 1166 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 1167 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); 1168 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); 1169 ctx->last = ctx->buffer_tail->buffer; 1170 ctx->prev = ctx->buffer_tail->buffer; 1171 ctx->prev_z = 1; 1172 1173 return 0; 1174 } 1175 1176 static void context_release(struct context *ctx) 1177 { 1178 struct fw_card *card = &ctx->ohci->card; 1179 struct descriptor_buffer *desc, *tmp; 1180 1181 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) 1182 dma_free_coherent(card->device, PAGE_SIZE, desc, 1183 desc->buffer_bus - 1184 ((void *)&desc->buffer - (void *)desc)); 1185 } 1186 1187 /* Must be called with ohci->lock held */ 1188 static struct descriptor *context_get_descriptors(struct context *ctx, 1189 int z, dma_addr_t *d_bus) 1190 { 1191 struct descriptor *d = NULL; 1192 struct descriptor_buffer *desc = ctx->buffer_tail; 1193 1194 if (z * sizeof(*d) > desc->buffer_size) 1195 return NULL; 1196 1197 if (z * sizeof(*d) > desc->buffer_size - desc->used) { 1198 /* No room for the descriptor in this buffer, so advance to the 1199 * next one. */ 1200 1201 if (desc->list.next == &ctx->buffer_list) { 1202 /* If there is no free buffer next in the list, 1203 * allocate one. */ 1204 if (context_add_buffer(ctx) < 0) 1205 return NULL; 1206 } 1207 desc = list_entry(desc->list.next, 1208 struct descriptor_buffer, list); 1209 ctx->buffer_tail = desc; 1210 } 1211 1212 d = desc->buffer + desc->used / sizeof(*d); 1213 memset(d, 0, z * sizeof(*d)); 1214 *d_bus = desc->buffer_bus + desc->used; 1215 1216 return d; 1217 } 1218 1219 static void context_run(struct context *ctx, u32 extra) 1220 { 1221 struct fw_ohci *ohci = ctx->ohci; 1222 1223 reg_write(ohci, COMMAND_PTR(ctx->regs), 1224 le32_to_cpu(ctx->last->branch_address)); 1225 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 1226 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 1227 ctx->running = true; 1228 flush_writes(ohci); 1229 } 1230 1231 static void context_append(struct context *ctx, 1232 struct descriptor *d, int z, int extra) 1233 { 1234 dma_addr_t d_bus; 1235 struct descriptor_buffer *desc = ctx->buffer_tail; 1236 struct descriptor *d_branch; 1237 1238 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 1239 1240 desc->used += (z + extra) * sizeof(*d); 1241 1242 wmb(); /* finish init of new descriptors before branch_address update */ 1243 1244 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); 1245 d_branch->branch_address = cpu_to_le32(d_bus | z); 1246 1247 /* 1248 * VT6306 incorrectly checks only the single descriptor at the 1249 * CommandPtr when the wake bit is written, so if it's a 1250 * multi-descriptor block starting with an INPUT_MORE, put a copy of 1251 * the branch address in the first descriptor. 1252 * 1253 * Not doing this for transmit contexts since not sure how it interacts 1254 * with skip addresses. 1255 */ 1256 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && 1257 d_branch != ctx->prev && 1258 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == 1259 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) { 1260 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 1261 } 1262 1263 ctx->prev = d; 1264 ctx->prev_z = z; 1265 } 1266 1267 static void context_stop(struct context *ctx) 1268 { 1269 struct fw_ohci *ohci = ctx->ohci; 1270 u32 reg; 1271 int i; 1272 1273 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1274 ctx->running = false; 1275 1276 for (i = 0; i < 1000; i++) { 1277 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); 1278 if ((reg & CONTEXT_ACTIVE) == 0) 1279 return; 1280 1281 if (i) 1282 udelay(10); 1283 } 1284 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg); 1285 } 1286 1287 struct driver_data { 1288 u8 inline_data[8]; 1289 struct fw_packet *packet; 1290 }; 1291 1292 /* 1293 * This function apppends a packet to the DMA queue for transmission. 1294 * Must always be called with the ochi->lock held to ensure proper 1295 * generation handling and locking around packet queue manipulation. 1296 */ 1297 static int at_context_queue_packet(struct context *ctx, 1298 struct fw_packet *packet) 1299 { 1300 struct fw_ohci *ohci = ctx->ohci; 1301 dma_addr_t d_bus, uninitialized_var(payload_bus); 1302 struct driver_data *driver_data; 1303 struct descriptor *d, *last; 1304 __le32 *header; 1305 int z, tcode; 1306 1307 d = context_get_descriptors(ctx, 4, &d_bus); 1308 if (d == NULL) { 1309 packet->ack = RCODE_SEND_ERROR; 1310 return -1; 1311 } 1312 1313 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 1314 d[0].res_count = cpu_to_le16(packet->timestamp); 1315 1316 /* 1317 * The DMA format for asynchronous link packets is different 1318 * from the IEEE1394 layout, so shift the fields around 1319 * accordingly. 1320 */ 1321 1322 tcode = (packet->header[0] >> 4) & 0x0f; 1323 header = (__le32 *) &d[1]; 1324 switch (tcode) { 1325 case TCODE_WRITE_QUADLET_REQUEST: 1326 case TCODE_WRITE_BLOCK_REQUEST: 1327 case TCODE_WRITE_RESPONSE: 1328 case TCODE_READ_QUADLET_REQUEST: 1329 case TCODE_READ_BLOCK_REQUEST: 1330 case TCODE_READ_QUADLET_RESPONSE: 1331 case TCODE_READ_BLOCK_RESPONSE: 1332 case TCODE_LOCK_REQUEST: 1333 case TCODE_LOCK_RESPONSE: 1334 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1335 (packet->speed << 16)); 1336 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 1337 (packet->header[0] & 0xffff0000)); 1338 header[2] = cpu_to_le32(packet->header[2]); 1339 1340 if (TCODE_IS_BLOCK_PACKET(tcode)) 1341 header[3] = cpu_to_le32(packet->header[3]); 1342 else 1343 header[3] = (__force __le32) packet->header[3]; 1344 1345 d[0].req_count = cpu_to_le16(packet->header_length); 1346 break; 1347 1348 case TCODE_LINK_INTERNAL: 1349 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 1350 (packet->speed << 16)); 1351 header[1] = cpu_to_le32(packet->header[1]); 1352 header[2] = cpu_to_le32(packet->header[2]); 1353 d[0].req_count = cpu_to_le16(12); 1354 1355 if (is_ping_packet(&packet->header[1])) 1356 d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1357 break; 1358 1359 case TCODE_STREAM_DATA: 1360 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1361 (packet->speed << 16)); 1362 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 1363 d[0].req_count = cpu_to_le16(8); 1364 break; 1365 1366 default: 1367 /* BUG(); */ 1368 packet->ack = RCODE_SEND_ERROR; 1369 return -1; 1370 } 1371 1372 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); 1373 driver_data = (struct driver_data *) &d[3]; 1374 driver_data->packet = packet; 1375 packet->driver_data = driver_data; 1376 1377 if (packet->payload_length > 0) { 1378 if (packet->payload_length > sizeof(driver_data->inline_data)) { 1379 payload_bus = dma_map_single(ohci->card.device, 1380 packet->payload, 1381 packet->payload_length, 1382 DMA_TO_DEVICE); 1383 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1384 packet->ack = RCODE_SEND_ERROR; 1385 return -1; 1386 } 1387 packet->payload_bus = payload_bus; 1388 packet->payload_mapped = true; 1389 } else { 1390 memcpy(driver_data->inline_data, packet->payload, 1391 packet->payload_length); 1392 payload_bus = d_bus + 3 * sizeof(*d); 1393 } 1394 1395 d[2].req_count = cpu_to_le16(packet->payload_length); 1396 d[2].data_address = cpu_to_le32(payload_bus); 1397 last = &d[2]; 1398 z = 3; 1399 } else { 1400 last = &d[0]; 1401 z = 2; 1402 } 1403 1404 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1405 DESCRIPTOR_IRQ_ALWAYS | 1406 DESCRIPTOR_BRANCH_ALWAYS); 1407 1408 /* FIXME: Document how the locking works. */ 1409 if (ohci->generation != packet->generation) { 1410 if (packet->payload_mapped) 1411 dma_unmap_single(ohci->card.device, payload_bus, 1412 packet->payload_length, DMA_TO_DEVICE); 1413 packet->ack = RCODE_GENERATION; 1414 return -1; 1415 } 1416 1417 context_append(ctx, d, z, 4 - z); 1418 1419 if (ctx->running) 1420 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 1421 else 1422 context_run(ctx, 0); 1423 1424 return 0; 1425 } 1426 1427 static void at_context_flush(struct context *ctx) 1428 { 1429 tasklet_disable(&ctx->tasklet); 1430 1431 ctx->flushing = true; 1432 context_tasklet((unsigned long)ctx); 1433 ctx->flushing = false; 1434 1435 tasklet_enable(&ctx->tasklet); 1436 } 1437 1438 static int handle_at_packet(struct context *context, 1439 struct descriptor *d, 1440 struct descriptor *last) 1441 { 1442 struct driver_data *driver_data; 1443 struct fw_packet *packet; 1444 struct fw_ohci *ohci = context->ohci; 1445 int evt; 1446 1447 if (last->transfer_status == 0 && !context->flushing) 1448 /* This descriptor isn't done yet, stop iteration. */ 1449 return 0; 1450 1451 driver_data = (struct driver_data *) &d[3]; 1452 packet = driver_data->packet; 1453 if (packet == NULL) 1454 /* This packet was cancelled, just continue. */ 1455 return 1; 1456 1457 if (packet->payload_mapped) 1458 dma_unmap_single(ohci->card.device, packet->payload_bus, 1459 packet->payload_length, DMA_TO_DEVICE); 1460 1461 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1462 packet->timestamp = le16_to_cpu(last->res_count); 1463 1464 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); 1465 1466 switch (evt) { 1467 case OHCI1394_evt_timeout: 1468 /* Async response transmit timed out. */ 1469 packet->ack = RCODE_CANCELLED; 1470 break; 1471 1472 case OHCI1394_evt_flushed: 1473 /* 1474 * The packet was flushed should give same error as 1475 * when we try to use a stale generation count. 1476 */ 1477 packet->ack = RCODE_GENERATION; 1478 break; 1479 1480 case OHCI1394_evt_missing_ack: 1481 if (context->flushing) 1482 packet->ack = RCODE_GENERATION; 1483 else { 1484 /* 1485 * Using a valid (current) generation count, but the 1486 * node is not on the bus or not sending acks. 1487 */ 1488 packet->ack = RCODE_NO_ACK; 1489 } 1490 break; 1491 1492 case ACK_COMPLETE + 0x10: 1493 case ACK_PENDING + 0x10: 1494 case ACK_BUSY_X + 0x10: 1495 case ACK_BUSY_A + 0x10: 1496 case ACK_BUSY_B + 0x10: 1497 case ACK_DATA_ERROR + 0x10: 1498 case ACK_TYPE_ERROR + 0x10: 1499 packet->ack = evt - 0x10; 1500 break; 1501 1502 case OHCI1394_evt_no_status: 1503 if (context->flushing) { 1504 packet->ack = RCODE_GENERATION; 1505 break; 1506 } 1507 /* fall through */ 1508 1509 default: 1510 packet->ack = RCODE_SEND_ERROR; 1511 break; 1512 } 1513 1514 packet->callback(packet, &ohci->card, packet->ack); 1515 1516 return 1; 1517 } 1518 1519 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 1520 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 1521 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 1522 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1523 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1524 1525 static void handle_local_rom(struct fw_ohci *ohci, 1526 struct fw_packet *packet, u32 csr) 1527 { 1528 struct fw_packet response; 1529 int tcode, length, i; 1530 1531 tcode = HEADER_GET_TCODE(packet->header[0]); 1532 if (TCODE_IS_BLOCK_PACKET(tcode)) 1533 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1534 else 1535 length = 4; 1536 1537 i = csr - CSR_CONFIG_ROM; 1538 if (i + length > CONFIG_ROM_SIZE) { 1539 fw_fill_response(&response, packet->header, 1540 RCODE_ADDRESS_ERROR, NULL, 0); 1541 } else if (!TCODE_IS_READ_REQUEST(tcode)) { 1542 fw_fill_response(&response, packet->header, 1543 RCODE_TYPE_ERROR, NULL, 0); 1544 } else { 1545 fw_fill_response(&response, packet->header, RCODE_COMPLETE, 1546 (void *) ohci->config_rom + i, length); 1547 } 1548 1549 fw_core_handle_response(&ohci->card, &response); 1550 } 1551 1552 static void handle_local_lock(struct fw_ohci *ohci, 1553 struct fw_packet *packet, u32 csr) 1554 { 1555 struct fw_packet response; 1556 int tcode, length, ext_tcode, sel, try; 1557 __be32 *payload, lock_old; 1558 u32 lock_arg, lock_data; 1559 1560 tcode = HEADER_GET_TCODE(packet->header[0]); 1561 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1562 payload = packet->payload; 1563 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 1564 1565 if (tcode == TCODE_LOCK_REQUEST && 1566 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 1567 lock_arg = be32_to_cpu(payload[0]); 1568 lock_data = be32_to_cpu(payload[1]); 1569 } else if (tcode == TCODE_READ_QUADLET_REQUEST) { 1570 lock_arg = 0; 1571 lock_data = 0; 1572 } else { 1573 fw_fill_response(&response, packet->header, 1574 RCODE_TYPE_ERROR, NULL, 0); 1575 goto out; 1576 } 1577 1578 sel = (csr - CSR_BUS_MANAGER_ID) / 4; 1579 reg_write(ohci, OHCI1394_CSRData, lock_data); 1580 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1581 reg_write(ohci, OHCI1394_CSRControl, sel); 1582 1583 for (try = 0; try < 20; try++) 1584 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { 1585 lock_old = cpu_to_be32(reg_read(ohci, 1586 OHCI1394_CSRData)); 1587 fw_fill_response(&response, packet->header, 1588 RCODE_COMPLETE, 1589 &lock_old, sizeof(lock_old)); 1590 goto out; 1591 } 1592 1593 ohci_err(ohci, "swap not done (CSR lock timeout)\n"); 1594 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); 1595 1596 out: 1597 fw_core_handle_response(&ohci->card, &response); 1598 } 1599 1600 static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1601 { 1602 u64 offset, csr; 1603 1604 if (ctx == &ctx->ohci->at_request_ctx) { 1605 packet->ack = ACK_PENDING; 1606 packet->callback(packet, &ctx->ohci->card, packet->ack); 1607 } 1608 1609 offset = 1610 ((unsigned long long) 1611 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 1612 packet->header[2]; 1613 csr = offset - CSR_REGISTER_BASE; 1614 1615 /* Handle config rom reads. */ 1616 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) 1617 handle_local_rom(ctx->ohci, packet, csr); 1618 else switch (csr) { 1619 case CSR_BUS_MANAGER_ID: 1620 case CSR_BANDWIDTH_AVAILABLE: 1621 case CSR_CHANNELS_AVAILABLE_HI: 1622 case CSR_CHANNELS_AVAILABLE_LO: 1623 handle_local_lock(ctx->ohci, packet, csr); 1624 break; 1625 default: 1626 if (ctx == &ctx->ohci->at_request_ctx) 1627 fw_core_handle_request(&ctx->ohci->card, packet); 1628 else 1629 fw_core_handle_response(&ctx->ohci->card, packet); 1630 break; 1631 } 1632 1633 if (ctx == &ctx->ohci->at_response_ctx) { 1634 packet->ack = ACK_COMPLETE; 1635 packet->callback(packet, &ctx->ohci->card, packet->ack); 1636 } 1637 } 1638 1639 static void at_context_transmit(struct context *ctx, struct fw_packet *packet) 1640 { 1641 unsigned long flags; 1642 int ret; 1643 1644 spin_lock_irqsave(&ctx->ohci->lock, flags); 1645 1646 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 1647 ctx->ohci->generation == packet->generation) { 1648 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1649 handle_local_request(ctx, packet); 1650 return; 1651 } 1652 1653 ret = at_context_queue_packet(ctx, packet); 1654 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1655 1656 if (ret < 0) 1657 packet->callback(packet, &ctx->ohci->card, packet->ack); 1658 1659 } 1660 1661 static void detect_dead_context(struct fw_ohci *ohci, 1662 const char *name, unsigned int regs) 1663 { 1664 u32 ctl; 1665 1666 ctl = reg_read(ohci, CONTROL_SET(regs)); 1667 if (ctl & CONTEXT_DEAD) 1668 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n", 1669 name, evts[ctl & 0x1f]); 1670 } 1671 1672 static void handle_dead_contexts(struct fw_ohci *ohci) 1673 { 1674 unsigned int i; 1675 char name[8]; 1676 1677 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); 1678 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); 1679 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); 1680 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); 1681 for (i = 0; i < 32; ++i) { 1682 if (!(ohci->it_context_support & (1 << i))) 1683 continue; 1684 sprintf(name, "IT%u", i); 1685 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); 1686 } 1687 for (i = 0; i < 32; ++i) { 1688 if (!(ohci->ir_context_support & (1 << i))) 1689 continue; 1690 sprintf(name, "IR%u", i); 1691 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); 1692 } 1693 /* TODO: maybe try to flush and restart the dead contexts */ 1694 } 1695 1696 static u32 cycle_timer_ticks(u32 cycle_timer) 1697 { 1698 u32 ticks; 1699 1700 ticks = cycle_timer & 0xfff; 1701 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 1702 ticks += (3072 * 8000) * (cycle_timer >> 25); 1703 1704 return ticks; 1705 } 1706 1707 /* 1708 * Some controllers exhibit one or more of the following bugs when updating the 1709 * iso cycle timer register: 1710 * - When the lowest six bits are wrapping around to zero, a read that happens 1711 * at the same time will return garbage in the lowest ten bits. 1712 * - When the cycleOffset field wraps around to zero, the cycleCount field is 1713 * not incremented for about 60 ns. 1714 * - Occasionally, the entire register reads zero. 1715 * 1716 * To catch these, we read the register three times and ensure that the 1717 * difference between each two consecutive reads is approximately the same, i.e. 1718 * less than twice the other. Furthermore, any negative difference indicates an 1719 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 1720 * execute, so we have enough precision to compute the ratio of the differences.) 1721 */ 1722 static u32 get_cycle_time(struct fw_ohci *ohci) 1723 { 1724 u32 c0, c1, c2; 1725 u32 t0, t1, t2; 1726 s32 diff01, diff12; 1727 int i; 1728 1729 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1730 1731 if (ohci->quirks & QUIRK_CYCLE_TIMER) { 1732 i = 0; 1733 c1 = c2; 1734 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1735 do { 1736 c0 = c1; 1737 c1 = c2; 1738 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1739 t0 = cycle_timer_ticks(c0); 1740 t1 = cycle_timer_ticks(c1); 1741 t2 = cycle_timer_ticks(c2); 1742 diff01 = t1 - t0; 1743 diff12 = t2 - t1; 1744 } while ((diff01 <= 0 || diff12 <= 0 || 1745 diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 1746 && i++ < 20); 1747 } 1748 1749 return c2; 1750 } 1751 1752 /* 1753 * This function has to be called at least every 64 seconds. The bus_time 1754 * field stores not only the upper 25 bits of the BUS_TIME register but also 1755 * the most significant bit of the cycle timer in bit 6 so that we can detect 1756 * changes in this bit. 1757 */ 1758 static u32 update_bus_time(struct fw_ohci *ohci) 1759 { 1760 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1761 1762 if (unlikely(!ohci->bus_time_running)) { 1763 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds); 1764 ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) | 1765 (cycle_time_seconds & 0x40); 1766 ohci->bus_time_running = true; 1767 } 1768 1769 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1770 ohci->bus_time += 0x40; 1771 1772 return ohci->bus_time | cycle_time_seconds; 1773 } 1774 1775 static int get_status_for_port(struct fw_ohci *ohci, int port_index) 1776 { 1777 int reg; 1778 1779 mutex_lock(&ohci->phy_reg_mutex); 1780 reg = write_phy_reg(ohci, 7, port_index); 1781 if (reg >= 0) 1782 reg = read_phy_reg(ohci, 8); 1783 mutex_unlock(&ohci->phy_reg_mutex); 1784 if (reg < 0) 1785 return reg; 1786 1787 switch (reg & 0x0f) { 1788 case 0x06: 1789 return 2; /* is child node (connected to parent node) */ 1790 case 0x0e: 1791 return 3; /* is parent node (connected to child node) */ 1792 } 1793 return 1; /* not connected */ 1794 } 1795 1796 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id, 1797 int self_id_count) 1798 { 1799 int i; 1800 u32 entry; 1801 1802 for (i = 0; i < self_id_count; i++) { 1803 entry = ohci->self_id_buffer[i]; 1804 if ((self_id & 0xff000000) == (entry & 0xff000000)) 1805 return -1; 1806 if ((self_id & 0xff000000) < (entry & 0xff000000)) 1807 return i; 1808 } 1809 return i; 1810 } 1811 1812 static int initiated_reset(struct fw_ohci *ohci) 1813 { 1814 int reg; 1815 int ret = 0; 1816 1817 mutex_lock(&ohci->phy_reg_mutex); 1818 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */ 1819 if (reg >= 0) { 1820 reg = read_phy_reg(ohci, 8); 1821 reg |= 0x40; 1822 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */ 1823 if (reg >= 0) { 1824 reg = read_phy_reg(ohci, 12); /* read register 12 */ 1825 if (reg >= 0) { 1826 if ((reg & 0x08) == 0x08) { 1827 /* bit 3 indicates "initiated reset" */ 1828 ret = 0x2; 1829 } 1830 } 1831 } 1832 } 1833 mutex_unlock(&ohci->phy_reg_mutex); 1834 return ret; 1835 } 1836 1837 /* 1838 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally 1839 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059. 1840 * Construct the selfID from phy register contents. 1841 */ 1842 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) 1843 { 1844 int reg, i, pos, status; 1845 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */ 1846 u32 self_id = 0x8040c800; 1847 1848 reg = reg_read(ohci, OHCI1394_NodeID); 1849 if (!(reg & OHCI1394_NodeID_idValid)) { 1850 ohci_notice(ohci, 1851 "node ID not valid, new bus reset in progress\n"); 1852 return -EBUSY; 1853 } 1854 self_id |= ((reg & 0x3f) << 24); /* phy ID */ 1855 1856 reg = ohci_read_phy_reg(&ohci->card, 4); 1857 if (reg < 0) 1858 return reg; 1859 self_id |= ((reg & 0x07) << 8); /* power class */ 1860 1861 reg = ohci_read_phy_reg(&ohci->card, 1); 1862 if (reg < 0) 1863 return reg; 1864 self_id |= ((reg & 0x3f) << 16); /* gap count */ 1865 1866 for (i = 0; i < 3; i++) { 1867 status = get_status_for_port(ohci, i); 1868 if (status < 0) 1869 return status; 1870 self_id |= ((status & 0x3) << (6 - (i * 2))); 1871 } 1872 1873 self_id |= initiated_reset(ohci); 1874 1875 pos = get_self_id_pos(ohci, self_id, self_id_count); 1876 if (pos >= 0) { 1877 memmove(&(ohci->self_id_buffer[pos+1]), 1878 &(ohci->self_id_buffer[pos]), 1879 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); 1880 ohci->self_id_buffer[pos] = self_id; 1881 self_id_count++; 1882 } 1883 return self_id_count; 1884 } 1885 1886 static void bus_reset_work(struct work_struct *work) 1887 { 1888 struct fw_ohci *ohci = 1889 container_of(work, struct fw_ohci, bus_reset_work); 1890 int self_id_count, generation, new_generation, i, j; 1891 u32 reg; 1892 void *free_rom = NULL; 1893 dma_addr_t free_rom_bus = 0; 1894 bool is_new_root; 1895 1896 reg = reg_read(ohci, OHCI1394_NodeID); 1897 if (!(reg & OHCI1394_NodeID_idValid)) { 1898 ohci_notice(ohci, 1899 "node ID not valid, new bus reset in progress\n"); 1900 return; 1901 } 1902 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1903 ohci_notice(ohci, "malconfigured bus\n"); 1904 return; 1905 } 1906 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1907 OHCI1394_NodeID_nodeNumber); 1908 1909 is_new_root = (reg & OHCI1394_NodeID_root) != 0; 1910 if (!(ohci->is_root && is_new_root)) 1911 reg_write(ohci, OHCI1394_LinkControlSet, 1912 OHCI1394_LinkControl_cycleMaster); 1913 ohci->is_root = is_new_root; 1914 1915 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1916 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1917 ohci_notice(ohci, "self ID receive error\n"); 1918 return; 1919 } 1920 /* 1921 * The count in the SelfIDCount register is the number of 1922 * bytes in the self ID receive buffer. Since we also receive 1923 * the inverted quadlets and a header quadlet, we shift one 1924 * bit extra to get the actual number of self IDs. 1925 */ 1926 self_id_count = (reg >> 3) & 0xff; 1927 1928 if (self_id_count > 252) { 1929 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg); 1930 return; 1931 } 1932 1933 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; 1934 rmb(); 1935 1936 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1937 u32 id = cond_le32_to_cpu(ohci->self_id[i]); 1938 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); 1939 1940 if (id != ~id2) { 1941 /* 1942 * If the invalid data looks like a cycle start packet, 1943 * it's likely to be the result of the cycle master 1944 * having a wrong gap count. In this case, the self IDs 1945 * so far are valid and should be processed so that the 1946 * bus manager can then correct the gap count. 1947 */ 1948 if (id == 0xffff008f) { 1949 ohci_notice(ohci, "ignoring spurious self IDs\n"); 1950 self_id_count = j; 1951 break; 1952 } 1953 1954 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n", 1955 j, self_id_count, id, id2); 1956 return; 1957 } 1958 ohci->self_id_buffer[j] = id; 1959 } 1960 1961 if (ohci->quirks & QUIRK_TI_SLLZ059) { 1962 self_id_count = find_and_insert_self_id(ohci, self_id_count); 1963 if (self_id_count < 0) { 1964 ohci_notice(ohci, 1965 "could not construct local self ID\n"); 1966 return; 1967 } 1968 } 1969 1970 if (self_id_count == 0) { 1971 ohci_notice(ohci, "no self IDs\n"); 1972 return; 1973 } 1974 rmb(); 1975 1976 /* 1977 * Check the consistency of the self IDs we just read. The 1978 * problem we face is that a new bus reset can start while we 1979 * read out the self IDs from the DMA buffer. If this happens, 1980 * the DMA buffer will be overwritten with new self IDs and we 1981 * will read out inconsistent data. The OHCI specification 1982 * (section 11.2) recommends a technique similar to 1983 * linux/seqlock.h, where we remember the generation of the 1984 * self IDs in the buffer before reading them out and compare 1985 * it to the current generation after reading them out. If 1986 * the two generations match we know we have a consistent set 1987 * of self IDs. 1988 */ 1989 1990 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1991 if (new_generation != generation) { 1992 ohci_notice(ohci, "new bus reset, discarding self ids\n"); 1993 return; 1994 } 1995 1996 /* FIXME: Document how the locking works. */ 1997 spin_lock_irq(&ohci->lock); 1998 1999 ohci->generation = -1; /* prevent AT packet queueing */ 2000 context_stop(&ohci->at_request_ctx); 2001 context_stop(&ohci->at_response_ctx); 2002 2003 spin_unlock_irq(&ohci->lock); 2004 2005 /* 2006 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent 2007 * packets in the AT queues and software needs to drain them. 2008 * Some OHCI 1.1 controllers (JMicron) apparently require this too. 2009 */ 2010 at_context_flush(&ohci->at_request_ctx); 2011 at_context_flush(&ohci->at_response_ctx); 2012 2013 spin_lock_irq(&ohci->lock); 2014 2015 ohci->generation = generation; 2016 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 2017 2018 if (ohci->quirks & QUIRK_RESET_PACKET) 2019 ohci->request_generation = generation; 2020 2021 /* 2022 * This next bit is unrelated to the AT context stuff but we 2023 * have to do it under the spinlock also. If a new config rom 2024 * was set up before this reset, the old one is now no longer 2025 * in use and we can free it. Update the config rom pointers 2026 * to point to the current config rom and clear the 2027 * next_config_rom pointer so a new update can take place. 2028 */ 2029 2030 if (ohci->next_config_rom != NULL) { 2031 if (ohci->next_config_rom != ohci->config_rom) { 2032 free_rom = ohci->config_rom; 2033 free_rom_bus = ohci->config_rom_bus; 2034 } 2035 ohci->config_rom = ohci->next_config_rom; 2036 ohci->config_rom_bus = ohci->next_config_rom_bus; 2037 ohci->next_config_rom = NULL; 2038 2039 /* 2040 * Restore config_rom image and manually update 2041 * config_rom registers. Writing the header quadlet 2042 * will indicate that the config rom is ready, so we 2043 * do that last. 2044 */ 2045 reg_write(ohci, OHCI1394_BusOptions, 2046 be32_to_cpu(ohci->config_rom[2])); 2047 ohci->config_rom[0] = ohci->next_header; 2048 reg_write(ohci, OHCI1394_ConfigROMhdr, 2049 be32_to_cpu(ohci->next_header)); 2050 } 2051 2052 if (param_remote_dma) { 2053 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); 2054 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 2055 } 2056 2057 spin_unlock_irq(&ohci->lock); 2058 2059 if (free_rom) 2060 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2061 free_rom, free_rom_bus); 2062 2063 log_selfids(ohci, generation, self_id_count); 2064 2065 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 2066 self_id_count, ohci->self_id_buffer, 2067 ohci->csr_state_setclear_abdicate); 2068 ohci->csr_state_setclear_abdicate = false; 2069 } 2070 2071 static irqreturn_t irq_handler(int irq, void *data) 2072 { 2073 struct fw_ohci *ohci = data; 2074 u32 event, iso_event; 2075 int i; 2076 2077 event = reg_read(ohci, OHCI1394_IntEventClear); 2078 2079 if (!event || !~event) 2080 return IRQ_NONE; 2081 2082 /* 2083 * busReset and postedWriteErr must not be cleared yet 2084 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) 2085 */ 2086 reg_write(ohci, OHCI1394_IntEventClear, 2087 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); 2088 log_irqs(ohci, event); 2089 2090 if (event & OHCI1394_selfIDComplete) 2091 queue_work(selfid_workqueue, &ohci->bus_reset_work); 2092 2093 if (event & OHCI1394_RQPkt) 2094 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 2095 2096 if (event & OHCI1394_RSPkt) 2097 tasklet_schedule(&ohci->ar_response_ctx.tasklet); 2098 2099 if (event & OHCI1394_reqTxComplete) 2100 tasklet_schedule(&ohci->at_request_ctx.tasklet); 2101 2102 if (event & OHCI1394_respTxComplete) 2103 tasklet_schedule(&ohci->at_response_ctx.tasklet); 2104 2105 if (event & OHCI1394_isochRx) { 2106 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 2107 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 2108 2109 while (iso_event) { 2110 i = ffs(iso_event) - 1; 2111 tasklet_schedule( 2112 &ohci->ir_context_list[i].context.tasklet); 2113 iso_event &= ~(1 << i); 2114 } 2115 } 2116 2117 if (event & OHCI1394_isochTx) { 2118 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 2119 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 2120 2121 while (iso_event) { 2122 i = ffs(iso_event) - 1; 2123 tasklet_schedule( 2124 &ohci->it_context_list[i].context.tasklet); 2125 iso_event &= ~(1 << i); 2126 } 2127 } 2128 2129 if (unlikely(event & OHCI1394_regAccessFail)) 2130 ohci_err(ohci, "register access failure\n"); 2131 2132 if (unlikely(event & OHCI1394_postedWriteErr)) { 2133 reg_read(ohci, OHCI1394_PostedWriteAddressHi); 2134 reg_read(ohci, OHCI1394_PostedWriteAddressLo); 2135 reg_write(ohci, OHCI1394_IntEventClear, 2136 OHCI1394_postedWriteErr); 2137 if (printk_ratelimit()) 2138 ohci_err(ohci, "PCI posted write error\n"); 2139 } 2140 2141 if (unlikely(event & OHCI1394_cycleTooLong)) { 2142 if (printk_ratelimit()) 2143 ohci_notice(ohci, "isochronous cycle too long\n"); 2144 reg_write(ohci, OHCI1394_LinkControlSet, 2145 OHCI1394_LinkControl_cycleMaster); 2146 } 2147 2148 if (unlikely(event & OHCI1394_cycleInconsistent)) { 2149 /* 2150 * We need to clear this event bit in order to make 2151 * cycleMatch isochronous I/O work. In theory we should 2152 * stop active cycleMatch iso contexts now and restart 2153 * them at least two cycles later. (FIXME?) 2154 */ 2155 if (printk_ratelimit()) 2156 ohci_notice(ohci, "isochronous cycle inconsistent\n"); 2157 } 2158 2159 if (unlikely(event & OHCI1394_unrecoverableError)) 2160 handle_dead_contexts(ohci); 2161 2162 if (event & OHCI1394_cycle64Seconds) { 2163 spin_lock(&ohci->lock); 2164 update_bus_time(ohci); 2165 spin_unlock(&ohci->lock); 2166 } else 2167 flush_writes(ohci); 2168 2169 return IRQ_HANDLED; 2170 } 2171 2172 static int software_reset(struct fw_ohci *ohci) 2173 { 2174 u32 val; 2175 int i; 2176 2177 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); 2178 for (i = 0; i < 500; i++) { 2179 val = reg_read(ohci, OHCI1394_HCControlSet); 2180 if (!~val) 2181 return -ENODEV; /* Card was ejected. */ 2182 2183 if (!(val & OHCI1394_HCControl_softReset)) 2184 return 0; 2185 2186 msleep(1); 2187 } 2188 2189 return -EBUSY; 2190 } 2191 2192 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) 2193 { 2194 size_t size = length * 4; 2195 2196 memcpy(dest, src, size); 2197 if (size < CONFIG_ROM_SIZE) 2198 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 2199 } 2200 2201 static int configure_1394a_enhancements(struct fw_ohci *ohci) 2202 { 2203 bool enable_1394a; 2204 int ret, clear, set, offset; 2205 2206 /* Check if the driver should configure link and PHY. */ 2207 if (!(reg_read(ohci, OHCI1394_HCControlSet) & 2208 OHCI1394_HCControl_programPhyEnable)) 2209 return 0; 2210 2211 /* Paranoia: check whether the PHY supports 1394a, too. */ 2212 enable_1394a = false; 2213 ret = read_phy_reg(ohci, 2); 2214 if (ret < 0) 2215 return ret; 2216 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { 2217 ret = read_paged_phy_reg(ohci, 1, 8); 2218 if (ret < 0) 2219 return ret; 2220 if (ret >= 1) 2221 enable_1394a = true; 2222 } 2223 2224 if (ohci->quirks & QUIRK_NO_1394A) 2225 enable_1394a = false; 2226 2227 /* Configure PHY and link consistently. */ 2228 if (enable_1394a) { 2229 clear = 0; 2230 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 2231 } else { 2232 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 2233 set = 0; 2234 } 2235 ret = update_phy_reg(ohci, 5, clear, set); 2236 if (ret < 0) 2237 return ret; 2238 2239 if (enable_1394a) 2240 offset = OHCI1394_HCControlSet; 2241 else 2242 offset = OHCI1394_HCControlClear; 2243 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); 2244 2245 /* Clean up: configuration has been taken care of. */ 2246 reg_write(ohci, OHCI1394_HCControlClear, 2247 OHCI1394_HCControl_programPhyEnable); 2248 2249 return 0; 2250 } 2251 2252 static int probe_tsb41ba3d(struct fw_ohci *ohci) 2253 { 2254 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */ 2255 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, }; 2256 int reg, i; 2257 2258 reg = read_phy_reg(ohci, 2); 2259 if (reg < 0) 2260 return reg; 2261 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS) 2262 return 0; 2263 2264 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { 2265 reg = read_paged_phy_reg(ohci, 1, i + 10); 2266 if (reg < 0) 2267 return reg; 2268 if (reg != id[i]) 2269 return 0; 2270 } 2271 return 1; 2272 } 2273 2274 static int ohci_enable(struct fw_card *card, 2275 const __be32 *config_rom, size_t length) 2276 { 2277 struct fw_ohci *ohci = fw_ohci(card); 2278 u32 lps, version, irqs; 2279 int i, ret; 2280 2281 ret = software_reset(ohci); 2282 if (ret < 0) { 2283 ohci_err(ohci, "failed to reset ohci card\n"); 2284 return ret; 2285 } 2286 2287 /* 2288 * Now enable LPS, which we need in order to start accessing 2289 * most of the registers. In fact, on some cards (ALI M5251), 2290 * accessing registers in the SClk domain without LPS enabled 2291 * will lock up the machine. Wait 50msec to make sure we have 2292 * full link enabled. However, with some cards (well, at least 2293 * a JMicron PCIe card), we have to try again sometimes. 2294 * 2295 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but 2296 * cannot actually use the phy at that time. These need tens of 2297 * millisecods pause between LPS write and first phy access too. 2298 */ 2299 2300 reg_write(ohci, OHCI1394_HCControlSet, 2301 OHCI1394_HCControl_LPS | 2302 OHCI1394_HCControl_postedWriteEnable); 2303 flush_writes(ohci); 2304 2305 for (lps = 0, i = 0; !lps && i < 3; i++) { 2306 msleep(50); 2307 lps = reg_read(ohci, OHCI1394_HCControlSet) & 2308 OHCI1394_HCControl_LPS; 2309 } 2310 2311 if (!lps) { 2312 ohci_err(ohci, "failed to set Link Power Status\n"); 2313 return -EIO; 2314 } 2315 2316 if (ohci->quirks & QUIRK_TI_SLLZ059) { 2317 ret = probe_tsb41ba3d(ohci); 2318 if (ret < 0) 2319 return ret; 2320 if (ret) 2321 ohci_notice(ohci, "local TSB41BA3D phy\n"); 2322 else 2323 ohci->quirks &= ~QUIRK_TI_SLLZ059; 2324 } 2325 2326 reg_write(ohci, OHCI1394_HCControlClear, 2327 OHCI1394_HCControl_noByteSwapData); 2328 2329 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 2330 reg_write(ohci, OHCI1394_LinkControlSet, 2331 OHCI1394_LinkControl_cycleTimerEnable | 2332 OHCI1394_LinkControl_cycleMaster); 2333 2334 reg_write(ohci, OHCI1394_ATRetries, 2335 OHCI1394_MAX_AT_REQ_RETRIES | 2336 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 2337 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 2338 (200 << 16)); 2339 2340 ohci->bus_time_running = false; 2341 2342 for (i = 0; i < 32; i++) 2343 if (ohci->ir_context_support & (1 << i)) 2344 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i), 2345 IR_CONTEXT_MULTI_CHANNEL_MODE); 2346 2347 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2348 if (version >= OHCI_VERSION_1_1) { 2349 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, 2350 0xfffffffe); 2351 card->broadcast_channel_auto_allocated = true; 2352 } 2353 2354 /* Get implemented bits of the priority arbitration request counter. */ 2355 reg_write(ohci, OHCI1394_FairnessControl, 0x3f); 2356 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; 2357 reg_write(ohci, OHCI1394_FairnessControl, 0); 2358 card->priority_budget_implemented = ohci->pri_req_max != 0; 2359 2360 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16); 2361 reg_write(ohci, OHCI1394_IntEventClear, ~0); 2362 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2363 2364 ret = configure_1394a_enhancements(ohci); 2365 if (ret < 0) 2366 return ret; 2367 2368 /* Activate link_on bit and contender bit in our self ID packets.*/ 2369 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); 2370 if (ret < 0) 2371 return ret; 2372 2373 /* 2374 * When the link is not yet enabled, the atomic config rom 2375 * update mechanism described below in ohci_set_config_rom() 2376 * is not active. We have to update ConfigRomHeader and 2377 * BusOptions manually, and the write to ConfigROMmap takes 2378 * effect immediately. We tie this to the enabling of the 2379 * link, so we have a valid config rom before enabling - the 2380 * OHCI requires that ConfigROMhdr and BusOptions have valid 2381 * values before enabling. 2382 * 2383 * However, when the ConfigROMmap is written, some controllers 2384 * always read back quadlets 0 and 2 from the config rom to 2385 * the ConfigRomHeader and BusOptions registers on bus reset. 2386 * They shouldn't do that in this initial case where the link 2387 * isn't enabled. This means we have to use the same 2388 * workaround here, setting the bus header to 0 and then write 2389 * the right values in the bus reset tasklet. 2390 */ 2391 2392 if (config_rom) { 2393 ohci->next_config_rom = 2394 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2395 &ohci->next_config_rom_bus, 2396 GFP_KERNEL); 2397 if (ohci->next_config_rom == NULL) 2398 return -ENOMEM; 2399 2400 copy_config_rom(ohci->next_config_rom, config_rom, length); 2401 } else { 2402 /* 2403 * In the suspend case, config_rom is NULL, which 2404 * means that we just reuse the old config rom. 2405 */ 2406 ohci->next_config_rom = ohci->config_rom; 2407 ohci->next_config_rom_bus = ohci->config_rom_bus; 2408 } 2409 2410 ohci->next_header = ohci->next_config_rom[0]; 2411 ohci->next_config_rom[0] = 0; 2412 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 2413 reg_write(ohci, OHCI1394_BusOptions, 2414 be32_to_cpu(ohci->next_config_rom[2])); 2415 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2416 2417 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 2418 2419 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 2420 OHCI1394_RQPkt | OHCI1394_RSPkt | 2421 OHCI1394_isochTx | OHCI1394_isochRx | 2422 OHCI1394_postedWriteErr | 2423 OHCI1394_selfIDComplete | 2424 OHCI1394_regAccessFail | 2425 OHCI1394_cycleInconsistent | 2426 OHCI1394_unrecoverableError | 2427 OHCI1394_cycleTooLong | 2428 OHCI1394_masterIntEnable; 2429 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 2430 irqs |= OHCI1394_busReset; 2431 reg_write(ohci, OHCI1394_IntMaskSet, irqs); 2432 2433 reg_write(ohci, OHCI1394_HCControlSet, 2434 OHCI1394_HCControl_linkEnable | 2435 OHCI1394_HCControl_BIBimageValid); 2436 2437 reg_write(ohci, OHCI1394_LinkControlSet, 2438 OHCI1394_LinkControl_rcvSelfID | 2439 OHCI1394_LinkControl_rcvPhyPkt); 2440 2441 ar_context_run(&ohci->ar_request_ctx); 2442 ar_context_run(&ohci->ar_response_ctx); 2443 2444 flush_writes(ohci); 2445 2446 /* We are ready to go, reset bus to finish initialization. */ 2447 fw_schedule_bus_reset(&ohci->card, false, true); 2448 2449 return 0; 2450 } 2451 2452 static int ohci_set_config_rom(struct fw_card *card, 2453 const __be32 *config_rom, size_t length) 2454 { 2455 struct fw_ohci *ohci; 2456 __be32 *next_config_rom; 2457 dma_addr_t uninitialized_var(next_config_rom_bus); 2458 2459 ohci = fw_ohci(card); 2460 2461 /* 2462 * When the OHCI controller is enabled, the config rom update 2463 * mechanism is a bit tricky, but easy enough to use. See 2464 * section 5.5.6 in the OHCI specification. 2465 * 2466 * The OHCI controller caches the new config rom address in a 2467 * shadow register (ConfigROMmapNext) and needs a bus reset 2468 * for the changes to take place. When the bus reset is 2469 * detected, the controller loads the new values for the 2470 * ConfigRomHeader and BusOptions registers from the specified 2471 * config rom and loads ConfigROMmap from the ConfigROMmapNext 2472 * shadow register. All automatically and atomically. 2473 * 2474 * Now, there's a twist to this story. The automatic load of 2475 * ConfigRomHeader and BusOptions doesn't honor the 2476 * noByteSwapData bit, so with a be32 config rom, the 2477 * controller will load be32 values in to these registers 2478 * during the atomic update, even on litte endian 2479 * architectures. The workaround we use is to put a 0 in the 2480 * header quadlet; 0 is endian agnostic and means that the 2481 * config rom isn't ready yet. In the bus reset tasklet we 2482 * then set up the real values for the two registers. 2483 * 2484 * We use ohci->lock to avoid racing with the code that sets 2485 * ohci->next_config_rom to NULL (see bus_reset_work). 2486 */ 2487 2488 next_config_rom = 2489 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2490 &next_config_rom_bus, GFP_KERNEL); 2491 if (next_config_rom == NULL) 2492 return -ENOMEM; 2493 2494 spin_lock_irq(&ohci->lock); 2495 2496 /* 2497 * If there is not an already pending config_rom update, 2498 * push our new allocation into the ohci->next_config_rom 2499 * and then mark the local variable as null so that we 2500 * won't deallocate the new buffer. 2501 * 2502 * OTOH, if there is a pending config_rom update, just 2503 * use that buffer with the new config_rom data, and 2504 * let this routine free the unused DMA allocation. 2505 */ 2506 2507 if (ohci->next_config_rom == NULL) { 2508 ohci->next_config_rom = next_config_rom; 2509 ohci->next_config_rom_bus = next_config_rom_bus; 2510 next_config_rom = NULL; 2511 } 2512 2513 copy_config_rom(ohci->next_config_rom, config_rom, length); 2514 2515 ohci->next_header = config_rom[0]; 2516 ohci->next_config_rom[0] = 0; 2517 2518 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2519 2520 spin_unlock_irq(&ohci->lock); 2521 2522 /* If we didn't use the DMA allocation, delete it. */ 2523 if (next_config_rom != NULL) 2524 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2525 next_config_rom, next_config_rom_bus); 2526 2527 /* 2528 * Now initiate a bus reset to have the changes take 2529 * effect. We clean up the old config rom memory and DMA 2530 * mappings in the bus reset tasklet, since the OHCI 2531 * controller could need to access it before the bus reset 2532 * takes effect. 2533 */ 2534 2535 fw_schedule_bus_reset(&ohci->card, true, true); 2536 2537 return 0; 2538 } 2539 2540 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 2541 { 2542 struct fw_ohci *ohci = fw_ohci(card); 2543 2544 at_context_transmit(&ohci->at_request_ctx, packet); 2545 } 2546 2547 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) 2548 { 2549 struct fw_ohci *ohci = fw_ohci(card); 2550 2551 at_context_transmit(&ohci->at_response_ctx, packet); 2552 } 2553 2554 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 2555 { 2556 struct fw_ohci *ohci = fw_ohci(card); 2557 struct context *ctx = &ohci->at_request_ctx; 2558 struct driver_data *driver_data = packet->driver_data; 2559 int ret = -ENOENT; 2560 2561 tasklet_disable(&ctx->tasklet); 2562 2563 if (packet->ack != 0) 2564 goto out; 2565 2566 if (packet->payload_mapped) 2567 dma_unmap_single(ohci->card.device, packet->payload_bus, 2568 packet->payload_length, DMA_TO_DEVICE); 2569 2570 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); 2571 driver_data->packet = NULL; 2572 packet->ack = RCODE_CANCELLED; 2573 packet->callback(packet, &ohci->card, packet->ack); 2574 ret = 0; 2575 out: 2576 tasklet_enable(&ctx->tasklet); 2577 2578 return ret; 2579 } 2580 2581 static int ohci_enable_phys_dma(struct fw_card *card, 2582 int node_id, int generation) 2583 { 2584 struct fw_ohci *ohci = fw_ohci(card); 2585 unsigned long flags; 2586 int n, ret = 0; 2587 2588 if (param_remote_dma) 2589 return 0; 2590 2591 /* 2592 * FIXME: Make sure this bitmask is cleared when we clear the busReset 2593 * interrupt bit. Clear physReqResourceAllBuses on bus reset. 2594 */ 2595 2596 spin_lock_irqsave(&ohci->lock, flags); 2597 2598 if (ohci->generation != generation) { 2599 ret = -ESTALE; 2600 goto out; 2601 } 2602 2603 /* 2604 * Note, if the node ID contains a non-local bus ID, physical DMA is 2605 * enabled for _all_ nodes on remote buses. 2606 */ 2607 2608 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 2609 if (n < 32) 2610 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); 2611 else 2612 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); 2613 2614 flush_writes(ohci); 2615 out: 2616 spin_unlock_irqrestore(&ohci->lock, flags); 2617 2618 return ret; 2619 } 2620 2621 static u32 ohci_read_csr(struct fw_card *card, int csr_offset) 2622 { 2623 struct fw_ohci *ohci = fw_ohci(card); 2624 unsigned long flags; 2625 u32 value; 2626 2627 switch (csr_offset) { 2628 case CSR_STATE_CLEAR: 2629 case CSR_STATE_SET: 2630 if (ohci->is_root && 2631 (reg_read(ohci, OHCI1394_LinkControlSet) & 2632 OHCI1394_LinkControl_cycleMaster)) 2633 value = CSR_STATE_BIT_CMSTR; 2634 else 2635 value = 0; 2636 if (ohci->csr_state_setclear_abdicate) 2637 value |= CSR_STATE_BIT_ABDICATE; 2638 2639 return value; 2640 2641 case CSR_NODE_IDS: 2642 return reg_read(ohci, OHCI1394_NodeID) << 16; 2643 2644 case CSR_CYCLE_TIME: 2645 return get_cycle_time(ohci); 2646 2647 case CSR_BUS_TIME: 2648 /* 2649 * We might be called just after the cycle timer has wrapped 2650 * around but just before the cycle64Seconds handler, so we 2651 * better check here, too, if the bus time needs to be updated. 2652 */ 2653 spin_lock_irqsave(&ohci->lock, flags); 2654 value = update_bus_time(ohci); 2655 spin_unlock_irqrestore(&ohci->lock, flags); 2656 return value; 2657 2658 case CSR_BUSY_TIMEOUT: 2659 value = reg_read(ohci, OHCI1394_ATRetries); 2660 return (value >> 4) & 0x0ffff00f; 2661 2662 case CSR_PRIORITY_BUDGET: 2663 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | 2664 (ohci->pri_req_max << 8); 2665 2666 default: 2667 WARN_ON(1); 2668 return 0; 2669 } 2670 } 2671 2672 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) 2673 { 2674 struct fw_ohci *ohci = fw_ohci(card); 2675 unsigned long flags; 2676 2677 switch (csr_offset) { 2678 case CSR_STATE_CLEAR: 2679 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2680 reg_write(ohci, OHCI1394_LinkControlClear, 2681 OHCI1394_LinkControl_cycleMaster); 2682 flush_writes(ohci); 2683 } 2684 if (value & CSR_STATE_BIT_ABDICATE) 2685 ohci->csr_state_setclear_abdicate = false; 2686 break; 2687 2688 case CSR_STATE_SET: 2689 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2690 reg_write(ohci, OHCI1394_LinkControlSet, 2691 OHCI1394_LinkControl_cycleMaster); 2692 flush_writes(ohci); 2693 } 2694 if (value & CSR_STATE_BIT_ABDICATE) 2695 ohci->csr_state_setclear_abdicate = true; 2696 break; 2697 2698 case CSR_NODE_IDS: 2699 reg_write(ohci, OHCI1394_NodeID, value >> 16); 2700 flush_writes(ohci); 2701 break; 2702 2703 case CSR_CYCLE_TIME: 2704 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); 2705 reg_write(ohci, OHCI1394_IntEventSet, 2706 OHCI1394_cycleInconsistent); 2707 flush_writes(ohci); 2708 break; 2709 2710 case CSR_BUS_TIME: 2711 spin_lock_irqsave(&ohci->lock, flags); 2712 ohci->bus_time = (update_bus_time(ohci) & 0x40) | 2713 (value & ~0x7f); 2714 spin_unlock_irqrestore(&ohci->lock, flags); 2715 break; 2716 2717 case CSR_BUSY_TIMEOUT: 2718 value = (value & 0xf) | ((value & 0xf) << 4) | 2719 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); 2720 reg_write(ohci, OHCI1394_ATRetries, value); 2721 flush_writes(ohci); 2722 break; 2723 2724 case CSR_PRIORITY_BUDGET: 2725 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); 2726 flush_writes(ohci); 2727 break; 2728 2729 default: 2730 WARN_ON(1); 2731 break; 2732 } 2733 } 2734 2735 static void flush_iso_completions(struct iso_context *ctx) 2736 { 2737 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, 2738 ctx->header_length, ctx->header, 2739 ctx->base.callback_data); 2740 ctx->header_length = 0; 2741 } 2742 2743 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) 2744 { 2745 u32 *ctx_hdr; 2746 2747 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { 2748 if (ctx->base.drop_overflow_headers) 2749 return; 2750 flush_iso_completions(ctx); 2751 } 2752 2753 ctx_hdr = ctx->header + ctx->header_length; 2754 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); 2755 2756 /* 2757 * The two iso header quadlets are byteswapped to little 2758 * endian by the controller, but we want to present them 2759 * as big endian for consistency with the bus endianness. 2760 */ 2761 if (ctx->base.header_size > 0) 2762 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */ 2763 if (ctx->base.header_size > 4) 2764 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */ 2765 if (ctx->base.header_size > 8) 2766 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); 2767 ctx->header_length += ctx->base.header_size; 2768 } 2769 2770 static int handle_ir_packet_per_buffer(struct context *context, 2771 struct descriptor *d, 2772 struct descriptor *last) 2773 { 2774 struct iso_context *ctx = 2775 container_of(context, struct iso_context, context); 2776 struct descriptor *pd; 2777 u32 buffer_dma; 2778 2779 for (pd = d; pd <= last; pd++) 2780 if (pd->transfer_status) 2781 break; 2782 if (pd > last) 2783 /* Descriptor(s) not done yet, stop iteration */ 2784 return 0; 2785 2786 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { 2787 d++; 2788 buffer_dma = le32_to_cpu(d->data_address); 2789 dma_sync_single_range_for_cpu(context->ohci->card.device, 2790 buffer_dma & PAGE_MASK, 2791 buffer_dma & ~PAGE_MASK, 2792 le16_to_cpu(d->req_count), 2793 DMA_FROM_DEVICE); 2794 } 2795 2796 copy_iso_headers(ctx, (u32 *) (last + 1)); 2797 2798 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) 2799 flush_iso_completions(ctx); 2800 2801 return 1; 2802 } 2803 2804 /* d == last because each descriptor block is only a single descriptor. */ 2805 static int handle_ir_buffer_fill(struct context *context, 2806 struct descriptor *d, 2807 struct descriptor *last) 2808 { 2809 struct iso_context *ctx = 2810 container_of(context, struct iso_context, context); 2811 unsigned int req_count, res_count, completed; 2812 u32 buffer_dma; 2813 2814 req_count = le16_to_cpu(last->req_count); 2815 res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); 2816 completed = req_count - res_count; 2817 buffer_dma = le32_to_cpu(last->data_address); 2818 2819 if (completed > 0) { 2820 ctx->mc_buffer_bus = buffer_dma; 2821 ctx->mc_completed = completed; 2822 } 2823 2824 if (res_count != 0) 2825 /* Descriptor(s) not done yet, stop iteration */ 2826 return 0; 2827 2828 dma_sync_single_range_for_cpu(context->ohci->card.device, 2829 buffer_dma & PAGE_MASK, 2830 buffer_dma & ~PAGE_MASK, 2831 completed, DMA_FROM_DEVICE); 2832 2833 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { 2834 ctx->base.callback.mc(&ctx->base, 2835 buffer_dma + completed, 2836 ctx->base.callback_data); 2837 ctx->mc_completed = 0; 2838 } 2839 2840 return 1; 2841 } 2842 2843 static void flush_ir_buffer_fill(struct iso_context *ctx) 2844 { 2845 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, 2846 ctx->mc_buffer_bus & PAGE_MASK, 2847 ctx->mc_buffer_bus & ~PAGE_MASK, 2848 ctx->mc_completed, DMA_FROM_DEVICE); 2849 2850 ctx->base.callback.mc(&ctx->base, 2851 ctx->mc_buffer_bus + ctx->mc_completed, 2852 ctx->base.callback_data); 2853 ctx->mc_completed = 0; 2854 } 2855 2856 static inline void sync_it_packet_for_cpu(struct context *context, 2857 struct descriptor *pd) 2858 { 2859 __le16 control; 2860 u32 buffer_dma; 2861 2862 /* only packets beginning with OUTPUT_MORE* have data buffers */ 2863 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 2864 return; 2865 2866 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */ 2867 pd += 2; 2868 2869 /* 2870 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's 2871 * data buffer is in the context program's coherent page and must not 2872 * be synced. 2873 */ 2874 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == 2875 (context->current_bus & PAGE_MASK)) { 2876 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 2877 return; 2878 pd++; 2879 } 2880 2881 do { 2882 buffer_dma = le32_to_cpu(pd->data_address); 2883 dma_sync_single_range_for_cpu(context->ohci->card.device, 2884 buffer_dma & PAGE_MASK, 2885 buffer_dma & ~PAGE_MASK, 2886 le16_to_cpu(pd->req_count), 2887 DMA_TO_DEVICE); 2888 control = pd->control; 2889 pd++; 2890 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))); 2891 } 2892 2893 static int handle_it_packet(struct context *context, 2894 struct descriptor *d, 2895 struct descriptor *last) 2896 { 2897 struct iso_context *ctx = 2898 container_of(context, struct iso_context, context); 2899 struct descriptor *pd; 2900 __be32 *ctx_hdr; 2901 2902 for (pd = d; pd <= last; pd++) 2903 if (pd->transfer_status) 2904 break; 2905 if (pd > last) 2906 /* Descriptor(s) not done yet, stop iteration */ 2907 return 0; 2908 2909 sync_it_packet_for_cpu(context, d); 2910 2911 if (ctx->header_length + 4 > PAGE_SIZE) { 2912 if (ctx->base.drop_overflow_headers) 2913 return 1; 2914 flush_iso_completions(ctx); 2915 } 2916 2917 ctx_hdr = ctx->header + ctx->header_length; 2918 ctx->last_timestamp = le16_to_cpu(last->res_count); 2919 /* Present this value as big-endian to match the receive code */ 2920 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | 2921 le16_to_cpu(pd->res_count)); 2922 ctx->header_length += 4; 2923 2924 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) 2925 flush_iso_completions(ctx); 2926 2927 return 1; 2928 } 2929 2930 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) 2931 { 2932 u32 hi = channels >> 32, lo = channels; 2933 2934 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); 2935 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); 2936 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); 2937 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); 2938 mmiowb(); 2939 ohci->mc_channels = channels; 2940 } 2941 2942 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2943 int type, int channel, size_t header_size) 2944 { 2945 struct fw_ohci *ohci = fw_ohci(card); 2946 struct iso_context *uninitialized_var(ctx); 2947 descriptor_callback_t uninitialized_var(callback); 2948 u64 *uninitialized_var(channels); 2949 u32 *uninitialized_var(mask), uninitialized_var(regs); 2950 int index, ret = -EBUSY; 2951 2952 spin_lock_irq(&ohci->lock); 2953 2954 switch (type) { 2955 case FW_ISO_CONTEXT_TRANSMIT: 2956 mask = &ohci->it_context_mask; 2957 callback = handle_it_packet; 2958 index = ffs(*mask) - 1; 2959 if (index >= 0) { 2960 *mask &= ~(1 << index); 2961 regs = OHCI1394_IsoXmitContextBase(index); 2962 ctx = &ohci->it_context_list[index]; 2963 } 2964 break; 2965 2966 case FW_ISO_CONTEXT_RECEIVE: 2967 channels = &ohci->ir_context_channels; 2968 mask = &ohci->ir_context_mask; 2969 callback = handle_ir_packet_per_buffer; 2970 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2971 if (index >= 0) { 2972 *channels &= ~(1ULL << channel); 2973 *mask &= ~(1 << index); 2974 regs = OHCI1394_IsoRcvContextBase(index); 2975 ctx = &ohci->ir_context_list[index]; 2976 } 2977 break; 2978 2979 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2980 mask = &ohci->ir_context_mask; 2981 callback = handle_ir_buffer_fill; 2982 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; 2983 if (index >= 0) { 2984 ohci->mc_allocated = true; 2985 *mask &= ~(1 << index); 2986 regs = OHCI1394_IsoRcvContextBase(index); 2987 ctx = &ohci->ir_context_list[index]; 2988 } 2989 break; 2990 2991 default: 2992 index = -1; 2993 ret = -ENOSYS; 2994 } 2995 2996 spin_unlock_irq(&ohci->lock); 2997 2998 if (index < 0) 2999 return ERR_PTR(ret); 3000 3001 memset(ctx, 0, sizeof(*ctx)); 3002 ctx->header_length = 0; 3003 ctx->header = (void *) __get_free_page(GFP_KERNEL); 3004 if (ctx->header == NULL) { 3005 ret = -ENOMEM; 3006 goto out; 3007 } 3008 ret = context_init(&ctx->context, ohci, regs, callback); 3009 if (ret < 0) 3010 goto out_with_header; 3011 3012 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { 3013 set_multichannel_mask(ohci, 0); 3014 ctx->mc_completed = 0; 3015 } 3016 3017 return &ctx->base; 3018 3019 out_with_header: 3020 free_page((unsigned long)ctx->header); 3021 out: 3022 spin_lock_irq(&ohci->lock); 3023 3024 switch (type) { 3025 case FW_ISO_CONTEXT_RECEIVE: 3026 *channels |= 1ULL << channel; 3027 break; 3028 3029 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3030 ohci->mc_allocated = false; 3031 break; 3032 } 3033 *mask |= 1 << index; 3034 3035 spin_unlock_irq(&ohci->lock); 3036 3037 return ERR_PTR(ret); 3038 } 3039 3040 static int ohci_start_iso(struct fw_iso_context *base, 3041 s32 cycle, u32 sync, u32 tags) 3042 { 3043 struct iso_context *ctx = container_of(base, struct iso_context, base); 3044 struct fw_ohci *ohci = ctx->context.ohci; 3045 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 3046 int index; 3047 3048 /* the controller cannot start without any queued packets */ 3049 if (ctx->context.last->branch_address == 0) 3050 return -ENODATA; 3051 3052 switch (ctx->base.type) { 3053 case FW_ISO_CONTEXT_TRANSMIT: 3054 index = ctx - ohci->it_context_list; 3055 match = 0; 3056 if (cycle >= 0) 3057 match = IT_CONTEXT_CYCLE_MATCH_ENABLE | 3058 (cycle & 0x7fff) << 16; 3059 3060 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 3061 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 3062 context_run(&ctx->context, match); 3063 break; 3064 3065 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3066 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; 3067 /* fall through */ 3068 case FW_ISO_CONTEXT_RECEIVE: 3069 index = ctx - ohci->ir_context_list; 3070 match = (tags << 28) | (sync << 8) | ctx->base.channel; 3071 if (cycle >= 0) { 3072 match |= (cycle & 0x07fff) << 12; 3073 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; 3074 } 3075 3076 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 3077 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 3078 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 3079 context_run(&ctx->context, control); 3080 3081 ctx->sync = sync; 3082 ctx->tags = tags; 3083 3084 break; 3085 } 3086 3087 return 0; 3088 } 3089 3090 static int ohci_stop_iso(struct fw_iso_context *base) 3091 { 3092 struct fw_ohci *ohci = fw_ohci(base->card); 3093 struct iso_context *ctx = container_of(base, struct iso_context, base); 3094 int index; 3095 3096 switch (ctx->base.type) { 3097 case FW_ISO_CONTEXT_TRANSMIT: 3098 index = ctx - ohci->it_context_list; 3099 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 3100 break; 3101 3102 case FW_ISO_CONTEXT_RECEIVE: 3103 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3104 index = ctx - ohci->ir_context_list; 3105 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 3106 break; 3107 } 3108 flush_writes(ohci); 3109 context_stop(&ctx->context); 3110 tasklet_kill(&ctx->context.tasklet); 3111 3112 return 0; 3113 } 3114 3115 static void ohci_free_iso_context(struct fw_iso_context *base) 3116 { 3117 struct fw_ohci *ohci = fw_ohci(base->card); 3118 struct iso_context *ctx = container_of(base, struct iso_context, base); 3119 unsigned long flags; 3120 int index; 3121 3122 ohci_stop_iso(base); 3123 context_release(&ctx->context); 3124 free_page((unsigned long)ctx->header); 3125 3126 spin_lock_irqsave(&ohci->lock, flags); 3127 3128 switch (base->type) { 3129 case FW_ISO_CONTEXT_TRANSMIT: 3130 index = ctx - ohci->it_context_list; 3131 ohci->it_context_mask |= 1 << index; 3132 break; 3133 3134 case FW_ISO_CONTEXT_RECEIVE: 3135 index = ctx - ohci->ir_context_list; 3136 ohci->ir_context_mask |= 1 << index; 3137 ohci->ir_context_channels |= 1ULL << base->channel; 3138 break; 3139 3140 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3141 index = ctx - ohci->ir_context_list; 3142 ohci->ir_context_mask |= 1 << index; 3143 ohci->ir_context_channels |= ohci->mc_channels; 3144 ohci->mc_channels = 0; 3145 ohci->mc_allocated = false; 3146 break; 3147 } 3148 3149 spin_unlock_irqrestore(&ohci->lock, flags); 3150 } 3151 3152 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) 3153 { 3154 struct fw_ohci *ohci = fw_ohci(base->card); 3155 unsigned long flags; 3156 int ret; 3157 3158 switch (base->type) { 3159 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3160 3161 spin_lock_irqsave(&ohci->lock, flags); 3162 3163 /* Don't allow multichannel to grab other contexts' channels. */ 3164 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { 3165 *channels = ohci->ir_context_channels; 3166 ret = -EBUSY; 3167 } else { 3168 set_multichannel_mask(ohci, *channels); 3169 ret = 0; 3170 } 3171 3172 spin_unlock_irqrestore(&ohci->lock, flags); 3173 3174 break; 3175 default: 3176 ret = -EINVAL; 3177 } 3178 3179 return ret; 3180 } 3181 3182 #ifdef CONFIG_PM 3183 static void ohci_resume_iso_dma(struct fw_ohci *ohci) 3184 { 3185 int i; 3186 struct iso_context *ctx; 3187 3188 for (i = 0 ; i < ohci->n_ir ; i++) { 3189 ctx = &ohci->ir_context_list[i]; 3190 if (ctx->context.running) 3191 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); 3192 } 3193 3194 for (i = 0 ; i < ohci->n_it ; i++) { 3195 ctx = &ohci->it_context_list[i]; 3196 if (ctx->context.running) 3197 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); 3198 } 3199 } 3200 #endif 3201 3202 static int queue_iso_transmit(struct iso_context *ctx, 3203 struct fw_iso_packet *packet, 3204 struct fw_iso_buffer *buffer, 3205 unsigned long payload) 3206 { 3207 struct descriptor *d, *last, *pd; 3208 struct fw_iso_packet *p; 3209 __le32 *header; 3210 dma_addr_t d_bus, page_bus; 3211 u32 z, header_z, payload_z, irq; 3212 u32 payload_index, payload_end_index, next_page_index; 3213 int page, end_page, i, length, offset; 3214 3215 p = packet; 3216 payload_index = payload; 3217 3218 if (p->skip) 3219 z = 1; 3220 else 3221 z = 2; 3222 if (p->header_length > 0) 3223 z++; 3224 3225 /* Determine the first page the payload isn't contained in. */ 3226 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; 3227 if (p->payload_length > 0) 3228 payload_z = end_page - (payload_index >> PAGE_SHIFT); 3229 else 3230 payload_z = 0; 3231 3232 z += payload_z; 3233 3234 /* Get header size in number of descriptors. */ 3235 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); 3236 3237 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 3238 if (d == NULL) 3239 return -ENOMEM; 3240 3241 if (!p->skip) { 3242 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 3243 d[0].req_count = cpu_to_le16(8); 3244 /* 3245 * Link the skip address to this descriptor itself. This causes 3246 * a context to skip a cycle whenever lost cycles or FIFO 3247 * overruns occur, without dropping the data. The application 3248 * should then decide whether this is an error condition or not. 3249 * FIXME: Make the context's cycle-lost behaviour configurable? 3250 */ 3251 d[0].branch_address = cpu_to_le32(d_bus | z); 3252 3253 header = (__le32 *) &d[1]; 3254 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 3255 IT_HEADER_TAG(p->tag) | 3256 IT_HEADER_TCODE(TCODE_STREAM_DATA) | 3257 IT_HEADER_CHANNEL(ctx->base.channel) | 3258 IT_HEADER_SPEED(ctx->base.speed)); 3259 header[1] = 3260 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 3261 p->payload_length)); 3262 } 3263 3264 if (p->header_length > 0) { 3265 d[2].req_count = cpu_to_le16(p->header_length); 3266 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); 3267 memcpy(&d[z], p->header, p->header_length); 3268 } 3269 3270 pd = d + z - payload_z; 3271 payload_end_index = payload_index + p->payload_length; 3272 for (i = 0; i < payload_z; i++) { 3273 page = payload_index >> PAGE_SHIFT; 3274 offset = payload_index & ~PAGE_MASK; 3275 next_page_index = (page + 1) << PAGE_SHIFT; 3276 length = 3277 min(next_page_index, payload_end_index) - payload_index; 3278 pd[i].req_count = cpu_to_le16(length); 3279 3280 page_bus = page_private(buffer->pages[page]); 3281 pd[i].data_address = cpu_to_le32(page_bus + offset); 3282 3283 dma_sync_single_range_for_device(ctx->context.ohci->card.device, 3284 page_bus, offset, length, 3285 DMA_TO_DEVICE); 3286 3287 payload_index += length; 3288 } 3289 3290 if (p->interrupt) 3291 irq = DESCRIPTOR_IRQ_ALWAYS; 3292 else 3293 irq = DESCRIPTOR_NO_IRQ; 3294 3295 last = z == 2 ? d : d + z - 1; 3296 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 3297 DESCRIPTOR_STATUS | 3298 DESCRIPTOR_BRANCH_ALWAYS | 3299 irq); 3300 3301 context_append(&ctx->context, d, z, header_z); 3302 3303 return 0; 3304 } 3305 3306 static int queue_iso_packet_per_buffer(struct iso_context *ctx, 3307 struct fw_iso_packet *packet, 3308 struct fw_iso_buffer *buffer, 3309 unsigned long payload) 3310 { 3311 struct device *device = ctx->context.ohci->card.device; 3312 struct descriptor *d, *pd; 3313 dma_addr_t d_bus, page_bus; 3314 u32 z, header_z, rest; 3315 int i, j, length; 3316 int page, offset, packet_count, header_size, payload_per_buffer; 3317 3318 /* 3319 * The OHCI controller puts the isochronous header and trailer in the 3320 * buffer, so we need at least 8 bytes. 3321 */ 3322 packet_count = packet->header_length / ctx->base.header_size; 3323 header_size = max(ctx->base.header_size, (size_t)8); 3324 3325 /* Get header size in number of descriptors. */ 3326 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 3327 page = payload >> PAGE_SHIFT; 3328 offset = payload & ~PAGE_MASK; 3329 payload_per_buffer = packet->payload_length / packet_count; 3330 3331 for (i = 0; i < packet_count; i++) { 3332 /* d points to the header descriptor */ 3333 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; 3334 d = context_get_descriptors(&ctx->context, 3335 z + header_z, &d_bus); 3336 if (d == NULL) 3337 return -ENOMEM; 3338 3339 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 3340 DESCRIPTOR_INPUT_MORE); 3341 if (packet->skip && i == 0) 3342 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 3343 d->req_count = cpu_to_le16(header_size); 3344 d->res_count = d->req_count; 3345 d->transfer_status = 0; 3346 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 3347 3348 rest = payload_per_buffer; 3349 pd = d; 3350 for (j = 1; j < z; j++) { 3351 pd++; 3352 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 3353 DESCRIPTOR_INPUT_MORE); 3354 3355 if (offset + rest < PAGE_SIZE) 3356 length = rest; 3357 else 3358 length = PAGE_SIZE - offset; 3359 pd->req_count = cpu_to_le16(length); 3360 pd->res_count = pd->req_count; 3361 pd->transfer_status = 0; 3362 3363 page_bus = page_private(buffer->pages[page]); 3364 pd->data_address = cpu_to_le32(page_bus + offset); 3365 3366 dma_sync_single_range_for_device(device, page_bus, 3367 offset, length, 3368 DMA_FROM_DEVICE); 3369 3370 offset = (offset + length) & ~PAGE_MASK; 3371 rest -= length; 3372 if (offset == 0) 3373 page++; 3374 } 3375 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 3376 DESCRIPTOR_INPUT_LAST | 3377 DESCRIPTOR_BRANCH_ALWAYS); 3378 if (packet->interrupt && i == packet_count - 1) 3379 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 3380 3381 context_append(&ctx->context, d, z, header_z); 3382 } 3383 3384 return 0; 3385 } 3386 3387 static int queue_iso_buffer_fill(struct iso_context *ctx, 3388 struct fw_iso_packet *packet, 3389 struct fw_iso_buffer *buffer, 3390 unsigned long payload) 3391 { 3392 struct descriptor *d; 3393 dma_addr_t d_bus, page_bus; 3394 int page, offset, rest, z, i, length; 3395 3396 page = payload >> PAGE_SHIFT; 3397 offset = payload & ~PAGE_MASK; 3398 rest = packet->payload_length; 3399 3400 /* We need one descriptor for each page in the buffer. */ 3401 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); 3402 3403 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) 3404 return -EFAULT; 3405 3406 for (i = 0; i < z; i++) { 3407 d = context_get_descriptors(&ctx->context, 1, &d_bus); 3408 if (d == NULL) 3409 return -ENOMEM; 3410 3411 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 3412 DESCRIPTOR_BRANCH_ALWAYS); 3413 if (packet->skip && i == 0) 3414 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 3415 if (packet->interrupt && i == z - 1) 3416 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 3417 3418 if (offset + rest < PAGE_SIZE) 3419 length = rest; 3420 else 3421 length = PAGE_SIZE - offset; 3422 d->req_count = cpu_to_le16(length); 3423 d->res_count = d->req_count; 3424 d->transfer_status = 0; 3425 3426 page_bus = page_private(buffer->pages[page]); 3427 d->data_address = cpu_to_le32(page_bus + offset); 3428 3429 dma_sync_single_range_for_device(ctx->context.ohci->card.device, 3430 page_bus, offset, length, 3431 DMA_FROM_DEVICE); 3432 3433 rest -= length; 3434 offset = 0; 3435 page++; 3436 3437 context_append(&ctx->context, d, 1, 0); 3438 } 3439 3440 return 0; 3441 } 3442 3443 static int ohci_queue_iso(struct fw_iso_context *base, 3444 struct fw_iso_packet *packet, 3445 struct fw_iso_buffer *buffer, 3446 unsigned long payload) 3447 { 3448 struct iso_context *ctx = container_of(base, struct iso_context, base); 3449 unsigned long flags; 3450 int ret = -ENOSYS; 3451 3452 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 3453 switch (base->type) { 3454 case FW_ISO_CONTEXT_TRANSMIT: 3455 ret = queue_iso_transmit(ctx, packet, buffer, payload); 3456 break; 3457 case FW_ISO_CONTEXT_RECEIVE: 3458 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); 3459 break; 3460 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3461 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); 3462 break; 3463 } 3464 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 3465 3466 return ret; 3467 } 3468 3469 static void ohci_flush_queue_iso(struct fw_iso_context *base) 3470 { 3471 struct context *ctx = 3472 &container_of(base, struct iso_context, base)->context; 3473 3474 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 3475 } 3476 3477 static int ohci_flush_iso_completions(struct fw_iso_context *base) 3478 { 3479 struct iso_context *ctx = container_of(base, struct iso_context, base); 3480 int ret = 0; 3481 3482 tasklet_disable(&ctx->context.tasklet); 3483 3484 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { 3485 context_tasklet((unsigned long)&ctx->context); 3486 3487 switch (base->type) { 3488 case FW_ISO_CONTEXT_TRANSMIT: 3489 case FW_ISO_CONTEXT_RECEIVE: 3490 if (ctx->header_length != 0) 3491 flush_iso_completions(ctx); 3492 break; 3493 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3494 if (ctx->mc_completed != 0) 3495 flush_ir_buffer_fill(ctx); 3496 break; 3497 default: 3498 ret = -ENOSYS; 3499 } 3500 3501 clear_bit_unlock(0, &ctx->flushing_completions); 3502 smp_mb__after_atomic(); 3503 } 3504 3505 tasklet_enable(&ctx->context.tasklet); 3506 3507 return ret; 3508 } 3509 3510 static const struct fw_card_driver ohci_driver = { 3511 .enable = ohci_enable, 3512 .read_phy_reg = ohci_read_phy_reg, 3513 .update_phy_reg = ohci_update_phy_reg, 3514 .set_config_rom = ohci_set_config_rom, 3515 .send_request = ohci_send_request, 3516 .send_response = ohci_send_response, 3517 .cancel_packet = ohci_cancel_packet, 3518 .enable_phys_dma = ohci_enable_phys_dma, 3519 .read_csr = ohci_read_csr, 3520 .write_csr = ohci_write_csr, 3521 3522 .allocate_iso_context = ohci_allocate_iso_context, 3523 .free_iso_context = ohci_free_iso_context, 3524 .set_iso_channels = ohci_set_iso_channels, 3525 .queue_iso = ohci_queue_iso, 3526 .flush_queue_iso = ohci_flush_queue_iso, 3527 .flush_iso_completions = ohci_flush_iso_completions, 3528 .start_iso = ohci_start_iso, 3529 .stop_iso = ohci_stop_iso, 3530 }; 3531 3532 #ifdef CONFIG_PPC_PMAC 3533 static void pmac_ohci_on(struct pci_dev *dev) 3534 { 3535 if (machine_is(powermac)) { 3536 struct device_node *ofn = pci_device_to_OF_node(dev); 3537 3538 if (ofn) { 3539 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); 3540 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 3541 } 3542 } 3543 } 3544 3545 static void pmac_ohci_off(struct pci_dev *dev) 3546 { 3547 if (machine_is(powermac)) { 3548 struct device_node *ofn = pci_device_to_OF_node(dev); 3549 3550 if (ofn) { 3551 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); 3552 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); 3553 } 3554 } 3555 } 3556 #else 3557 static inline void pmac_ohci_on(struct pci_dev *dev) {} 3558 static inline void pmac_ohci_off(struct pci_dev *dev) {} 3559 #endif /* CONFIG_PPC_PMAC */ 3560 3561 static int pci_probe(struct pci_dev *dev, 3562 const struct pci_device_id *ent) 3563 { 3564 struct fw_ohci *ohci; 3565 u32 bus_options, max_receive, link_speed, version; 3566 u64 guid; 3567 int i, err; 3568 size_t size; 3569 3570 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { 3571 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); 3572 return -ENOSYS; 3573 } 3574 3575 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 3576 if (ohci == NULL) { 3577 err = -ENOMEM; 3578 goto fail; 3579 } 3580 3581 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 3582 3583 pmac_ohci_on(dev); 3584 3585 err = pci_enable_device(dev); 3586 if (err) { 3587 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); 3588 goto fail_free; 3589 } 3590 3591 pci_set_master(dev); 3592 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 3593 pci_set_drvdata(dev, ohci); 3594 3595 spin_lock_init(&ohci->lock); 3596 mutex_init(&ohci->phy_reg_mutex); 3597 3598 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); 3599 3600 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || 3601 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { 3602 ohci_err(ohci, "invalid MMIO resource\n"); 3603 err = -ENXIO; 3604 goto fail_disable; 3605 } 3606 3607 err = pci_request_region(dev, 0, ohci_driver_name); 3608 if (err) { 3609 ohci_err(ohci, "MMIO resource unavailable\n"); 3610 goto fail_disable; 3611 } 3612 3613 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 3614 if (ohci->registers == NULL) { 3615 ohci_err(ohci, "failed to remap registers\n"); 3616 err = -ENXIO; 3617 goto fail_iomem; 3618 } 3619 3620 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 3621 if ((ohci_quirks[i].vendor == dev->vendor) && 3622 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || 3623 ohci_quirks[i].device == dev->device) && 3624 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || 3625 ohci_quirks[i].revision >= dev->revision)) { 3626 ohci->quirks = ohci_quirks[i].flags; 3627 break; 3628 } 3629 if (param_quirks) 3630 ohci->quirks = param_quirks; 3631 3632 /* 3633 * Because dma_alloc_coherent() allocates at least one page, 3634 * we save space by using a common buffer for the AR request/ 3635 * response descriptors and the self IDs buffer. 3636 */ 3637 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); 3638 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); 3639 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, 3640 PAGE_SIZE, 3641 &ohci->misc_buffer_bus, 3642 GFP_KERNEL); 3643 if (!ohci->misc_buffer) { 3644 err = -ENOMEM; 3645 goto fail_iounmap; 3646 } 3647 3648 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, 3649 OHCI1394_AsReqRcvContextControlSet); 3650 if (err < 0) 3651 goto fail_misc_buf; 3652 3653 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, 3654 OHCI1394_AsRspRcvContextControlSet); 3655 if (err < 0) 3656 goto fail_arreq_ctx; 3657 3658 err = context_init(&ohci->at_request_ctx, ohci, 3659 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 3660 if (err < 0) 3661 goto fail_arrsp_ctx; 3662 3663 err = context_init(&ohci->at_response_ctx, ohci, 3664 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 3665 if (err < 0) 3666 goto fail_atreq_ctx; 3667 3668 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3669 ohci->ir_context_channels = ~0ULL; 3670 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3671 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3672 ohci->ir_context_mask = ohci->ir_context_support; 3673 ohci->n_ir = hweight32(ohci->ir_context_mask); 3674 size = sizeof(struct iso_context) * ohci->n_ir; 3675 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3676 3677 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3678 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3679 /* JMicron JMB38x often shows 0 at first read, just ignore it */ 3680 if (!ohci->it_context_support) { 3681 ohci_notice(ohci, "overriding IsoXmitIntMask\n"); 3682 ohci->it_context_support = 0xf; 3683 } 3684 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3685 ohci->it_context_mask = ohci->it_context_support; 3686 ohci->n_it = hweight32(ohci->it_context_mask); 3687 size = sizeof(struct iso_context) * ohci->n_it; 3688 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 3689 3690 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 3691 err = -ENOMEM; 3692 goto fail_contexts; 3693 } 3694 3695 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; 3696 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; 3697 3698 bus_options = reg_read(ohci, OHCI1394_BusOptions); 3699 max_receive = (bus_options >> 12) & 0xf; 3700 link_speed = bus_options & 0x7; 3701 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | 3702 reg_read(ohci, OHCI1394_GUIDLo); 3703 3704 if (!(ohci->quirks & QUIRK_NO_MSI)) 3705 pci_enable_msi(dev); 3706 if (request_irq(dev->irq, irq_handler, 3707 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, 3708 ohci_driver_name, ohci)) { 3709 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); 3710 err = -EIO; 3711 goto fail_msi; 3712 } 3713 3714 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 3715 if (err) 3716 goto fail_irq; 3717 3718 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 3719 ohci_notice(ohci, 3720 "added OHCI v%x.%x device as card %d, " 3721 "%d IR + %d IT contexts, quirks 0x%x%s\n", 3722 version >> 16, version & 0xff, ohci->card.index, 3723 ohci->n_ir, ohci->n_it, ohci->quirks, 3724 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3725 ", physUB" : ""); 3726 3727 return 0; 3728 3729 fail_irq: 3730 free_irq(dev->irq, ohci); 3731 fail_msi: 3732 pci_disable_msi(dev); 3733 fail_contexts: 3734 kfree(ohci->ir_context_list); 3735 kfree(ohci->it_context_list); 3736 context_release(&ohci->at_response_ctx); 3737 fail_atreq_ctx: 3738 context_release(&ohci->at_request_ctx); 3739 fail_arrsp_ctx: 3740 ar_context_release(&ohci->ar_response_ctx); 3741 fail_arreq_ctx: 3742 ar_context_release(&ohci->ar_request_ctx); 3743 fail_misc_buf: 3744 dma_free_coherent(ohci->card.device, PAGE_SIZE, 3745 ohci->misc_buffer, ohci->misc_buffer_bus); 3746 fail_iounmap: 3747 pci_iounmap(dev, ohci->registers); 3748 fail_iomem: 3749 pci_release_region(dev, 0); 3750 fail_disable: 3751 pci_disable_device(dev); 3752 fail_free: 3753 kfree(ohci); 3754 pmac_ohci_off(dev); 3755 fail: 3756 return err; 3757 } 3758 3759 static void pci_remove(struct pci_dev *dev) 3760 { 3761 struct fw_ohci *ohci = pci_get_drvdata(dev); 3762 3763 /* 3764 * If the removal is happening from the suspend state, LPS won't be 3765 * enabled and host registers (eg., IntMaskClear) won't be accessible. 3766 */ 3767 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) { 3768 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 3769 flush_writes(ohci); 3770 } 3771 cancel_work_sync(&ohci->bus_reset_work); 3772 fw_core_remove_card(&ohci->card); 3773 3774 /* 3775 * FIXME: Fail all pending packets here, now that the upper 3776 * layers can't queue any more. 3777 */ 3778 3779 software_reset(ohci); 3780 free_irq(dev->irq, ohci); 3781 3782 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) 3783 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3784 ohci->next_config_rom, ohci->next_config_rom_bus); 3785 if (ohci->config_rom) 3786 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3787 ohci->config_rom, ohci->config_rom_bus); 3788 ar_context_release(&ohci->ar_request_ctx); 3789 ar_context_release(&ohci->ar_response_ctx); 3790 dma_free_coherent(ohci->card.device, PAGE_SIZE, 3791 ohci->misc_buffer, ohci->misc_buffer_bus); 3792 context_release(&ohci->at_request_ctx); 3793 context_release(&ohci->at_response_ctx); 3794 kfree(ohci->it_context_list); 3795 kfree(ohci->ir_context_list); 3796 pci_disable_msi(dev); 3797 pci_iounmap(dev, ohci->registers); 3798 pci_release_region(dev, 0); 3799 pci_disable_device(dev); 3800 kfree(ohci); 3801 pmac_ohci_off(dev); 3802 3803 dev_notice(&dev->dev, "removed fw-ohci device\n"); 3804 } 3805 3806 #ifdef CONFIG_PM 3807 static int pci_suspend(struct pci_dev *dev, pm_message_t state) 3808 { 3809 struct fw_ohci *ohci = pci_get_drvdata(dev); 3810 int err; 3811 3812 software_reset(ohci); 3813 err = pci_save_state(dev); 3814 if (err) { 3815 ohci_err(ohci, "pci_save_state failed\n"); 3816 return err; 3817 } 3818 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 3819 if (err) 3820 ohci_err(ohci, "pci_set_power_state failed with %d\n", err); 3821 pmac_ohci_off(dev); 3822 3823 return 0; 3824 } 3825 3826 static int pci_resume(struct pci_dev *dev) 3827 { 3828 struct fw_ohci *ohci = pci_get_drvdata(dev); 3829 int err; 3830 3831 pmac_ohci_on(dev); 3832 pci_set_power_state(dev, PCI_D0); 3833 pci_restore_state(dev); 3834 err = pci_enable_device(dev); 3835 if (err) { 3836 ohci_err(ohci, "pci_enable_device failed\n"); 3837 return err; 3838 } 3839 3840 /* Some systems don't setup GUID register on resume from ram */ 3841 if (!reg_read(ohci, OHCI1394_GUIDLo) && 3842 !reg_read(ohci, OHCI1394_GUIDHi)) { 3843 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); 3844 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); 3845 } 3846 3847 err = ohci_enable(&ohci->card, NULL, 0); 3848 if (err) 3849 return err; 3850 3851 ohci_resume_iso_dma(ohci); 3852 3853 return 0; 3854 } 3855 #endif 3856 3857 static const struct pci_device_id pci_table[] = { 3858 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 3859 { } 3860 }; 3861 3862 MODULE_DEVICE_TABLE(pci, pci_table); 3863 3864 static struct pci_driver fw_ohci_pci_driver = { 3865 .name = ohci_driver_name, 3866 .id_table = pci_table, 3867 .probe = pci_probe, 3868 .remove = pci_remove, 3869 #ifdef CONFIG_PM 3870 .resume = pci_resume, 3871 .suspend = pci_suspend, 3872 #endif 3873 }; 3874 3875 static int __init fw_ohci_init(void) 3876 { 3877 selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0); 3878 if (!selfid_workqueue) 3879 return -ENOMEM; 3880 3881 return pci_register_driver(&fw_ohci_pci_driver); 3882 } 3883 3884 static void __exit fw_ohci_cleanup(void) 3885 { 3886 pci_unregister_driver(&fw_ohci_pci_driver); 3887 destroy_workqueue(selfid_workqueue); 3888 } 3889 3890 module_init(fw_ohci_init); 3891 module_exit(fw_ohci_cleanup); 3892 3893 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 3894 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 3895 MODULE_LICENSE("GPL"); 3896 3897 /* Provide a module alias so root-on-sbp2 initrds don't break. */ 3898 MODULE_ALIAS("ohci1394"); 3899