1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 4 */ 5 6 #include <linux/bug.h> 7 #include <linux/completion.h> 8 #include <linux/crc-itu-t.h> 9 #include <linux/device.h> 10 #include <linux/errno.h> 11 #include <linux/firewire.h> 12 #include <linux/firewire-constants.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/kref.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/mutex.h> 19 #include <linux/spinlock.h> 20 #include <linux/workqueue.h> 21 22 #include <linux/atomic.h> 23 #include <asm/byteorder.h> 24 25 #include "core.h" 26 #include <trace/events/firewire.h> 27 28 #define define_fw_printk_level(func, kern_level) \ 29 void func(const struct fw_card *card, const char *fmt, ...) \ 30 { \ 31 struct va_format vaf; \ 32 va_list args; \ 33 \ 34 va_start(args, fmt); \ 35 vaf.fmt = fmt; \ 36 vaf.va = &args; \ 37 printk(kern_level KBUILD_MODNAME " %s: %pV", \ 38 dev_name(card->device), &vaf); \ 39 va_end(args); \ 40 } 41 define_fw_printk_level(fw_err, KERN_ERR); 42 define_fw_printk_level(fw_notice, KERN_NOTICE); 43 44 int fw_compute_block_crc(__be32 *block) 45 { 46 int length; 47 u16 crc; 48 49 length = (be32_to_cpu(block[0]) >> 16) & 0xff; 50 crc = crc_itu_t(0, (u8 *)&block[1], length * 4); 51 *block |= cpu_to_be32(crc); 52 53 return length; 54 } 55 56 static DEFINE_MUTEX(card_mutex); 57 static LIST_HEAD(card_list); 58 59 static LIST_HEAD(descriptor_list); 60 static int descriptor_count; 61 62 static __be32 tmp_config_rom[256]; 63 /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ 64 static size_t config_rom_length = 1 + 4 + 1 + 1; 65 66 #define BIB_CRC(v) ((v) << 0) 67 #define BIB_CRC_LENGTH(v) ((v) << 16) 68 #define BIB_INFO_LENGTH(v) ((v) << 24) 69 #define BIB_BUS_NAME 0x31333934 /* "1394" */ 70 #define BIB_LINK_SPEED(v) ((v) << 0) 71 #define BIB_GENERATION(v) ((v) << 4) 72 #define BIB_MAX_ROM(v) ((v) << 8) 73 #define BIB_MAX_RECEIVE(v) ((v) << 12) 74 #define BIB_CYC_CLK_ACC(v) ((v) << 16) 75 #define BIB_PMC ((1) << 27) 76 #define BIB_BMC ((1) << 28) 77 #define BIB_ISC ((1) << 29) 78 #define BIB_CMC ((1) << 30) 79 #define BIB_IRMC ((1) << 31) 80 #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ 81 82 /* 83 * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), 84 * but we have to make it longer because there are many devices whose firmware 85 * is just too slow for that. 86 */ 87 #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) 88 89 #define CANON_OUI 0x000085 90 91 static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 92 { 93 struct fw_descriptor *desc; 94 int i, j, k, length; 95 96 /* 97 * Initialize contents of config rom buffer. On the OHCI 98 * controller, block reads to the config rom accesses the host 99 * memory, but quadlet read access the hardware bus info block 100 * registers. That's just crack, but it means we should make 101 * sure the contents of bus info block in host memory matches 102 * the version stored in the OHCI registers. 103 */ 104 105 config_rom[0] = cpu_to_be32( 106 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); 107 config_rom[1] = cpu_to_be32(BIB_BUS_NAME); 108 config_rom[2] = cpu_to_be32( 109 BIB_LINK_SPEED(card->link_speed) | 110 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 111 BIB_MAX_ROM(2) | 112 BIB_MAX_RECEIVE(card->max_receive) | 113 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); 114 config_rom[3] = cpu_to_be32(card->guid >> 32); 115 config_rom[4] = cpu_to_be32(card->guid); 116 117 /* Generate root directory. */ 118 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); 119 i = 7; 120 j = 7 + descriptor_count; 121 122 /* Generate root directory entries for descriptors. */ 123 list_for_each_entry (desc, &descriptor_list, link) { 124 if (desc->immediate > 0) 125 config_rom[i++] = cpu_to_be32(desc->immediate); 126 config_rom[i] = cpu_to_be32(desc->key | (j - i)); 127 i++; 128 j += desc->length; 129 } 130 131 /* Update root directory length. */ 132 config_rom[5] = cpu_to_be32((i - 5 - 1) << 16); 133 134 /* End of root directory, now copy in descriptors. */ 135 list_for_each_entry (desc, &descriptor_list, link) { 136 for (k = 0; k < desc->length; k++) 137 config_rom[i + k] = cpu_to_be32(desc->data[k]); 138 i += desc->length; 139 } 140 141 /* Calculate CRCs for all blocks in the config rom. This 142 * assumes that CRC length and info length are identical for 143 * the bus info block, which is always the case for this 144 * implementation. */ 145 for (i = 0; i < j; i += length + 1) 146 length = fw_compute_block_crc(config_rom + i); 147 148 WARN_ON(j != config_rom_length); 149 } 150 151 static void update_config_roms(void) 152 { 153 struct fw_card *card; 154 155 list_for_each_entry (card, &card_list, link) { 156 generate_config_rom(card, tmp_config_rom); 157 card->driver->set_config_rom(card, tmp_config_rom, 158 config_rom_length); 159 } 160 } 161 162 static size_t required_space(struct fw_descriptor *desc) 163 { 164 /* descriptor + entry into root dir + optional immediate entry */ 165 return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); 166 } 167 168 int fw_core_add_descriptor(struct fw_descriptor *desc) 169 { 170 size_t i; 171 int ret; 172 173 /* 174 * Check descriptor is valid; the length of all blocks in the 175 * descriptor has to add up to exactly the length of the 176 * block. 177 */ 178 i = 0; 179 while (i < desc->length) 180 i += (desc->data[i] >> 16) + 1; 181 182 if (i != desc->length) 183 return -EINVAL; 184 185 mutex_lock(&card_mutex); 186 187 if (config_rom_length + required_space(desc) > 256) { 188 ret = -EBUSY; 189 } else { 190 list_add_tail(&desc->link, &descriptor_list); 191 config_rom_length += required_space(desc); 192 descriptor_count++; 193 if (desc->immediate > 0) 194 descriptor_count++; 195 update_config_roms(); 196 ret = 0; 197 } 198 199 mutex_unlock(&card_mutex); 200 201 return ret; 202 } 203 EXPORT_SYMBOL(fw_core_add_descriptor); 204 205 void fw_core_remove_descriptor(struct fw_descriptor *desc) 206 { 207 mutex_lock(&card_mutex); 208 209 list_del(&desc->link); 210 config_rom_length -= required_space(desc); 211 descriptor_count--; 212 if (desc->immediate > 0) 213 descriptor_count--; 214 update_config_roms(); 215 216 mutex_unlock(&card_mutex); 217 } 218 EXPORT_SYMBOL(fw_core_remove_descriptor); 219 220 static int reset_bus(struct fw_card *card, bool short_reset) 221 { 222 int reg = short_reset ? 5 : 1; 223 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 224 225 trace_bus_reset_initiate(card->generation, short_reset); 226 227 return card->driver->update_phy_reg(card, reg, 0, bit); 228 } 229 230 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) 231 { 232 trace_bus_reset_schedule(card->generation, short_reset); 233 234 /* We don't try hard to sort out requests of long vs. short resets. */ 235 card->br_short = short_reset; 236 237 /* Use an arbitrary short delay to combine multiple reset requests. */ 238 fw_card_get(card); 239 if (!queue_delayed_work(fw_workqueue, &card->br_work, 240 delayed ? DIV_ROUND_UP(HZ, 100) : 0)) 241 fw_card_put(card); 242 } 243 EXPORT_SYMBOL(fw_schedule_bus_reset); 244 245 static void br_work(struct work_struct *work) 246 { 247 struct fw_card *card = container_of(work, struct fw_card, br_work.work); 248 249 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 250 if (card->reset_jiffies != 0 && 251 time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { 252 trace_bus_reset_postpone(card->generation, card->br_short); 253 254 if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) 255 fw_card_put(card); 256 return; 257 } 258 259 fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, 260 FW_PHY_CONFIG_CURRENT_GAP_COUNT); 261 reset_bus(card, card->br_short); 262 fw_card_put(card); 263 } 264 265 static void allocate_broadcast_channel(struct fw_card *card, int generation) 266 { 267 int channel, bandwidth = 0; 268 269 if (!card->broadcast_channel_allocated) { 270 fw_iso_resource_manage(card, generation, 1ULL << 31, 271 &channel, &bandwidth, true); 272 if (channel != 31) { 273 fw_notice(card, "failed to allocate broadcast channel\n"); 274 return; 275 } 276 card->broadcast_channel_allocated = true; 277 } 278 279 device_for_each_child(card->device, (void *)(long)generation, 280 fw_device_set_broadcast_channel); 281 } 282 283 static const char gap_count_table[] = { 284 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 285 }; 286 287 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) 288 { 289 fw_card_get(card); 290 if (!schedule_delayed_work(&card->bm_work, delay)) 291 fw_card_put(card); 292 } 293 294 static void bm_work(struct work_struct *work) 295 { 296 struct fw_card *card = container_of(work, struct fw_card, bm_work.work); 297 struct fw_device *root_device, *irm_device; 298 struct fw_node *root_node; 299 int root_id, new_root_id, irm_id, bm_id, local_id; 300 int gap_count, generation, grace, rcode; 301 bool do_reset = false; 302 bool root_device_is_running; 303 bool root_device_is_cmc; 304 bool irm_is_1394_1995_only; 305 bool keep_this_irm; 306 __be32 transaction_data[2]; 307 308 spin_lock_irq(&card->lock); 309 310 if (card->local_node == NULL) { 311 spin_unlock_irq(&card->lock); 312 goto out_put_card; 313 } 314 315 generation = card->generation; 316 317 root_node = card->root_node; 318 fw_node_get(root_node); 319 root_device = root_node->data; 320 root_device_is_running = root_device && 321 atomic_read(&root_device->state) == FW_DEVICE_RUNNING; 322 root_device_is_cmc = root_device && root_device->cmc; 323 324 irm_device = card->irm_node->data; 325 irm_is_1394_1995_only = irm_device && irm_device->config_rom && 326 (irm_device->config_rom[2] & 0x000000f0) == 0; 327 328 /* Canon MV5i works unreliably if it is not root node. */ 329 keep_this_irm = irm_device && irm_device->config_rom && 330 irm_device->config_rom[3] >> 8 == CANON_OUI; 331 332 root_id = root_node->node_id; 333 irm_id = card->irm_node->node_id; 334 local_id = card->local_node->node_id; 335 336 grace = time_after64(get_jiffies_64(), 337 card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 338 339 if ((is_next_generation(generation, card->bm_generation) && 340 !card->bm_abdicate) || 341 (card->bm_generation != generation && grace)) { 342 /* 343 * This first step is to figure out who is IRM and 344 * then try to become bus manager. If the IRM is not 345 * well defined (e.g. does not have an active link 346 * layer or does not responds to our lock request, we 347 * will have to do a little vigilante bus management. 348 * In that case, we do a goto into the gap count logic 349 * so that when we do the reset, we still optimize the 350 * gap count. That could well save a reset in the 351 * next generation. 352 */ 353 354 if (!card->irm_node->link_on) { 355 new_root_id = local_id; 356 fw_notice(card, "%s, making local node (%02x) root\n", 357 "IRM has link off", new_root_id); 358 goto pick_me; 359 } 360 361 if (irm_is_1394_1995_only && !keep_this_irm) { 362 new_root_id = local_id; 363 fw_notice(card, "%s, making local node (%02x) root\n", 364 "IRM is not 1394a compliant", new_root_id); 365 goto pick_me; 366 } 367 368 transaction_data[0] = cpu_to_be32(0x3f); 369 transaction_data[1] = cpu_to_be32(local_id); 370 371 spin_unlock_irq(&card->lock); 372 373 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 374 irm_id, generation, SCODE_100, 375 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 376 transaction_data, 8); 377 378 if (rcode == RCODE_GENERATION) 379 /* Another bus reset, BM work has been rescheduled. */ 380 goto out; 381 382 bm_id = be32_to_cpu(transaction_data[0]); 383 384 spin_lock_irq(&card->lock); 385 if (rcode == RCODE_COMPLETE && generation == card->generation) 386 card->bm_node_id = 387 bm_id == 0x3f ? local_id : 0xffc0 | bm_id; 388 spin_unlock_irq(&card->lock); 389 390 if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { 391 /* Somebody else is BM. Only act as IRM. */ 392 if (local_id == irm_id) 393 allocate_broadcast_channel(card, generation); 394 395 goto out; 396 } 397 398 if (rcode == RCODE_SEND_ERROR) { 399 /* 400 * We have been unable to send the lock request due to 401 * some local problem. Let's try again later and hope 402 * that the problem has gone away by then. 403 */ 404 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 405 goto out; 406 } 407 408 spin_lock_irq(&card->lock); 409 410 if (rcode != RCODE_COMPLETE && !keep_this_irm) { 411 /* 412 * The lock request failed, maybe the IRM 413 * isn't really IRM capable after all. Let's 414 * do a bus reset and pick the local node as 415 * root, and thus, IRM. 416 */ 417 new_root_id = local_id; 418 fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n", 419 fw_rcode_string(rcode), new_root_id); 420 goto pick_me; 421 } 422 } else if (card->bm_generation != generation) { 423 /* 424 * We weren't BM in the last generation, and the last 425 * bus reset is less than 125ms ago. Reschedule this job. 426 */ 427 spin_unlock_irq(&card->lock); 428 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 429 goto out; 430 } 431 432 /* 433 * We're bus manager for this generation, so next step is to 434 * make sure we have an active cycle master and do gap count 435 * optimization. 436 */ 437 card->bm_generation = generation; 438 439 if (card->gap_count == 0) { 440 /* 441 * If self IDs have inconsistent gap counts, do a 442 * bus reset ASAP. The config rom read might never 443 * complete, so don't wait for it. However, still 444 * send a PHY configuration packet prior to the 445 * bus reset. The PHY configuration packet might 446 * fail, but 1394-2008 8.4.5.2 explicitly permits 447 * it in this case, so it should be safe to try. 448 */ 449 new_root_id = local_id; 450 /* 451 * We must always send a bus reset if the gap count 452 * is inconsistent, so bypass the 5-reset limit. 453 */ 454 card->bm_retries = 0; 455 } else if (root_device == NULL) { 456 /* 457 * Either link_on is false, or we failed to read the 458 * config rom. In either case, pick another root. 459 */ 460 new_root_id = local_id; 461 } else if (!root_device_is_running) { 462 /* 463 * If we haven't probed this device yet, bail out now 464 * and let's try again once that's done. 465 */ 466 spin_unlock_irq(&card->lock); 467 goto out; 468 } else if (root_device_is_cmc) { 469 /* 470 * We will send out a force root packet for this 471 * node as part of the gap count optimization. 472 */ 473 new_root_id = root_id; 474 } else { 475 /* 476 * Current root has an active link layer and we 477 * successfully read the config rom, but it's not 478 * cycle master capable. 479 */ 480 new_root_id = local_id; 481 } 482 483 pick_me: 484 /* 485 * Pick a gap count from 1394a table E-1. The table doesn't cover 486 * the typically much larger 1394b beta repeater delays though. 487 */ 488 if (!card->beta_repeaters_present && 489 root_node->max_hops < ARRAY_SIZE(gap_count_table)) 490 gap_count = gap_count_table[root_node->max_hops]; 491 else 492 gap_count = 63; 493 494 /* 495 * Finally, figure out if we should do a reset or not. If we have 496 * done less than 5 resets with the same physical topology and we 497 * have either a new root or a new gap count setting, let's do it. 498 */ 499 500 if (card->bm_retries++ < 5 && 501 (card->gap_count != gap_count || new_root_id != root_id)) 502 do_reset = true; 503 504 spin_unlock_irq(&card->lock); 505 506 if (do_reset) { 507 fw_notice(card, "phy config: new root=%x, gap_count=%d\n", 508 new_root_id, gap_count); 509 fw_send_phy_config(card, new_root_id, generation, gap_count); 510 /* 511 * Where possible, use a short bus reset to minimize 512 * disruption to isochronous transfers. But in the event 513 * of a gap count inconsistency, use a long bus reset. 514 * 515 * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus 516 * may set different gap counts after a bus reset. On a mixed 517 * 1394/1394a bus, a short bus reset can get doubled. Some 518 * nodes may treat the double reset as one bus reset and others 519 * may treat it as two, causing a gap count inconsistency 520 * again. Using a long bus reset prevents this. 521 */ 522 reset_bus(card, card->gap_count != 0); 523 /* Will allocate broadcast channel after the reset. */ 524 goto out; 525 } 526 527 if (root_device_is_cmc) { 528 /* 529 * Make sure that the cycle master sends cycle start packets. 530 */ 531 transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); 532 rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 533 root_id, generation, SCODE_100, 534 CSR_REGISTER_BASE + CSR_STATE_SET, 535 transaction_data, 4); 536 if (rcode == RCODE_GENERATION) 537 goto out; 538 } 539 540 if (local_id == irm_id) 541 allocate_broadcast_channel(card, generation); 542 543 out: 544 fw_node_put(root_node); 545 out_put_card: 546 fw_card_put(card); 547 } 548 549 void fw_card_initialize(struct fw_card *card, 550 const struct fw_card_driver *driver, 551 struct device *device) 552 { 553 static atomic_t index = ATOMIC_INIT(-1); 554 555 card->index = atomic_inc_return(&index); 556 card->driver = driver; 557 card->device = device; 558 card->current_tlabel = 0; 559 card->tlabel_mask = 0; 560 card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; 561 card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; 562 card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; 563 card->split_timeout_jiffies = 564 DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); 565 card->color = 0; 566 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 567 568 kref_init(&card->kref); 569 init_completion(&card->done); 570 INIT_LIST_HEAD(&card->transaction_list); 571 INIT_LIST_HEAD(&card->phy_receiver_list); 572 spin_lock_init(&card->lock); 573 574 card->local_node = NULL; 575 576 INIT_DELAYED_WORK(&card->br_work, br_work); 577 INIT_DELAYED_WORK(&card->bm_work, bm_work); 578 } 579 EXPORT_SYMBOL(fw_card_initialize); 580 581 int fw_card_add(struct fw_card *card, 582 u32 max_receive, u32 link_speed, u64 guid) 583 { 584 int ret; 585 586 card->max_receive = max_receive; 587 card->link_speed = link_speed; 588 card->guid = guid; 589 590 mutex_lock(&card_mutex); 591 592 generate_config_rom(card, tmp_config_rom); 593 ret = card->driver->enable(card, tmp_config_rom, config_rom_length); 594 if (ret == 0) 595 list_add_tail(&card->link, &card_list); 596 597 mutex_unlock(&card_mutex); 598 599 return ret; 600 } 601 EXPORT_SYMBOL(fw_card_add); 602 603 /* 604 * The next few functions implement a dummy driver that is used once a card 605 * driver shuts down an fw_card. This allows the driver to cleanly unload, 606 * as all IO to the card will be handled (and failed) by the dummy driver 607 * instead of calling into the module. Only functions for iso context 608 * shutdown still need to be provided by the card driver. 609 * 610 * .read/write_csr() should never be called anymore after the dummy driver 611 * was bound since they are only used within request handler context. 612 * .set_config_rom() is never called since the card is taken out of card_list 613 * before switching to the dummy driver. 614 */ 615 616 static int dummy_read_phy_reg(struct fw_card *card, int address) 617 { 618 return -ENODEV; 619 } 620 621 static int dummy_update_phy_reg(struct fw_card *card, int address, 622 int clear_bits, int set_bits) 623 { 624 return -ENODEV; 625 } 626 627 static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) 628 { 629 packet->callback(packet, card, RCODE_CANCELLED); 630 } 631 632 static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) 633 { 634 packet->callback(packet, card, RCODE_CANCELLED); 635 } 636 637 static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) 638 { 639 return -ENOENT; 640 } 641 642 static int dummy_enable_phys_dma(struct fw_card *card, 643 int node_id, int generation) 644 { 645 return -ENODEV; 646 } 647 648 static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, 649 int type, int channel, size_t header_size) 650 { 651 return ERR_PTR(-ENODEV); 652 } 653 654 static u32 dummy_read_csr(struct fw_card *card, int csr_offset) 655 { 656 return 0; 657 } 658 659 static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value) 660 { 661 } 662 663 static int dummy_start_iso(struct fw_iso_context *ctx, 664 s32 cycle, u32 sync, u32 tags) 665 { 666 return -ENODEV; 667 } 668 669 static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) 670 { 671 return -ENODEV; 672 } 673 674 static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, 675 struct fw_iso_buffer *buffer, unsigned long payload) 676 { 677 return -ENODEV; 678 } 679 680 static void dummy_flush_queue_iso(struct fw_iso_context *ctx) 681 { 682 } 683 684 static int dummy_flush_iso_completions(struct fw_iso_context *ctx) 685 { 686 return -ENODEV; 687 } 688 689 static const struct fw_card_driver dummy_driver_template = { 690 .read_phy_reg = dummy_read_phy_reg, 691 .update_phy_reg = dummy_update_phy_reg, 692 .send_request = dummy_send_request, 693 .send_response = dummy_send_response, 694 .cancel_packet = dummy_cancel_packet, 695 .enable_phys_dma = dummy_enable_phys_dma, 696 .read_csr = dummy_read_csr, 697 .write_csr = dummy_write_csr, 698 .allocate_iso_context = dummy_allocate_iso_context, 699 .start_iso = dummy_start_iso, 700 .set_iso_channels = dummy_set_iso_channels, 701 .queue_iso = dummy_queue_iso, 702 .flush_queue_iso = dummy_flush_queue_iso, 703 .flush_iso_completions = dummy_flush_iso_completions, 704 }; 705 706 void fw_card_release(struct kref *kref) 707 { 708 struct fw_card *card = container_of(kref, struct fw_card, kref); 709 710 complete(&card->done); 711 } 712 EXPORT_SYMBOL_GPL(fw_card_release); 713 714 void fw_core_remove_card(struct fw_card *card) 715 { 716 struct fw_card_driver dummy_driver = dummy_driver_template; 717 unsigned long flags; 718 719 card->driver->update_phy_reg(card, 4, 720 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 721 fw_schedule_bus_reset(card, false, true); 722 723 mutex_lock(&card_mutex); 724 list_del_init(&card->link); 725 mutex_unlock(&card_mutex); 726 727 /* Switch off most of the card driver interface. */ 728 dummy_driver.free_iso_context = card->driver->free_iso_context; 729 dummy_driver.stop_iso = card->driver->stop_iso; 730 card->driver = &dummy_driver; 731 732 spin_lock_irqsave(&card->lock, flags); 733 fw_destroy_nodes(card); 734 spin_unlock_irqrestore(&card->lock, flags); 735 736 /* Wait for all users, especially device workqueue jobs, to finish. */ 737 fw_card_put(card); 738 wait_for_completion(&card->done); 739 740 WARN_ON(!list_empty(&card->transaction_list)); 741 } 742 EXPORT_SYMBOL(fw_core_remove_card); 743 744 /** 745 * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region 746 * for controller card. 747 * @card: The instance of card for 1394 OHCI controller. 748 * @cycle_time: The mutual reference to value of cycle time for the read operation. 749 * 750 * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given 751 * controller card. This function accesses the region without any lock primitives or IRQ mask. 752 * When returning successfully, the content of @value argument has value aligned to host endianness, 753 * formetted by CYCLE_TIME CSR Register of IEEE 1394 std. 754 * 755 * Context: Any context. 756 * Return: 757 * * 0 - Read successfully. 758 * * -ENODEV - The controller is unavailable due to being removed or unbound. 759 */ 760 int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time) 761 { 762 if (card->driver->read_csr == dummy_read_csr) 763 return -ENODEV; 764 765 // It's possible to switch to dummy driver between the above and the below. This is the best 766 // effort to return -ENODEV. 767 *cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); 768 return 0; 769 } 770 EXPORT_SYMBOL_GPL(fw_card_read_cycle_time); 771