1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 4 */ 5 6 #include <linux/bug.h> 7 #include <linux/completion.h> 8 #include <linux/crc-itu-t.h> 9 #include <linux/device.h> 10 #include <linux/errno.h> 11 #include <linux/firewire.h> 12 #include <linux/firewire-constants.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/kref.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/mutex.h> 19 #include <linux/spinlock.h> 20 #include <linux/workqueue.h> 21 22 #include <linux/atomic.h> 23 #include <asm/byteorder.h> 24 25 #include "core.h" 26 #include <trace/events/firewire.h> 27 28 #define define_fw_printk_level(func, kern_level) \ 29 void func(const struct fw_card *card, const char *fmt, ...) \ 30 { \ 31 struct va_format vaf; \ 32 va_list args; \ 33 \ 34 va_start(args, fmt); \ 35 vaf.fmt = fmt; \ 36 vaf.va = &args; \ 37 printk(kern_level KBUILD_MODNAME " %s: %pV", \ 38 dev_name(card->device), &vaf); \ 39 va_end(args); \ 40 } 41 define_fw_printk_level(fw_err, KERN_ERR); 42 define_fw_printk_level(fw_notice, KERN_NOTICE); 43 44 int fw_compute_block_crc(__be32 *block) 45 { 46 int length; 47 u16 crc; 48 49 length = (be32_to_cpu(block[0]) >> 16) & 0xff; 50 crc = crc_itu_t(0, (u8 *)&block[1], length * 4); 51 *block |= cpu_to_be32(crc); 52 53 return length; 54 } 55 56 static DEFINE_MUTEX(card_mutex); 57 static LIST_HEAD(card_list); 58 59 static LIST_HEAD(descriptor_list); 60 static int descriptor_count; 61 62 static __be32 tmp_config_rom[256]; 63 /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ 64 static size_t config_rom_length = 1 + 4 + 1 + 1; 65 66 #define BIB_CRC(v) ((v) << 0) 67 #define BIB_CRC_LENGTH(v) ((v) << 16) 68 #define BIB_INFO_LENGTH(v) ((v) << 24) 69 #define BIB_BUS_NAME 0x31333934 /* "1394" */ 70 #define BIB_LINK_SPEED(v) ((v) << 0) 71 #define BIB_GENERATION(v) ((v) << 4) 72 #define BIB_MAX_ROM(v) ((v) << 8) 73 #define BIB_MAX_RECEIVE(v) ((v) << 12) 74 #define BIB_CYC_CLK_ACC(v) ((v) << 16) 75 #define BIB_PMC ((1) << 27) 76 #define BIB_BMC ((1) << 28) 77 #define BIB_ISC ((1) << 29) 78 #define BIB_CMC ((1) << 30) 79 #define BIB_IRMC ((1) << 31) 80 #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ 81 82 /* 83 * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), 84 * but we have to make it longer because there are many devices whose firmware 85 * is just too slow for that. 86 */ 87 #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) 88 89 #define CANON_OUI 0x000085 90 91 static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 92 { 93 struct fw_descriptor *desc; 94 int i, j, k, length; 95 96 /* 97 * Initialize contents of config rom buffer. On the OHCI 98 * controller, block reads to the config rom accesses the host 99 * memory, but quadlet read access the hardware bus info block 100 * registers. That's just crack, but it means we should make 101 * sure the contents of bus info block in host memory matches 102 * the version stored in the OHCI registers. 103 */ 104 105 config_rom[0] = cpu_to_be32( 106 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); 107 config_rom[1] = cpu_to_be32(BIB_BUS_NAME); 108 config_rom[2] = cpu_to_be32( 109 BIB_LINK_SPEED(card->link_speed) | 110 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 111 BIB_MAX_ROM(2) | 112 BIB_MAX_RECEIVE(card->max_receive) | 113 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); 114 config_rom[3] = cpu_to_be32(card->guid >> 32); 115 config_rom[4] = cpu_to_be32(card->guid); 116 117 /* Generate root directory. */ 118 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); 119 i = 7; 120 j = 7 + descriptor_count; 121 122 /* Generate root directory entries for descriptors. */ 123 list_for_each_entry (desc, &descriptor_list, link) { 124 if (desc->immediate > 0) 125 config_rom[i++] = cpu_to_be32(desc->immediate); 126 config_rom[i] = cpu_to_be32(desc->key | (j - i)); 127 i++; 128 j += desc->length; 129 } 130 131 /* Update root directory length. */ 132 config_rom[5] = cpu_to_be32((i - 5 - 1) << 16); 133 134 /* End of root directory, now copy in descriptors. */ 135 list_for_each_entry (desc, &descriptor_list, link) { 136 for (k = 0; k < desc->length; k++) 137 config_rom[i + k] = cpu_to_be32(desc->data[k]); 138 i += desc->length; 139 } 140 141 /* Calculate CRCs for all blocks in the config rom. This 142 * assumes that CRC length and info length are identical for 143 * the bus info block, which is always the case for this 144 * implementation. */ 145 for (i = 0; i < j; i += length + 1) 146 length = fw_compute_block_crc(config_rom + i); 147 148 WARN_ON(j != config_rom_length); 149 } 150 151 static void update_config_roms(void) 152 { 153 struct fw_card *card; 154 155 list_for_each_entry (card, &card_list, link) { 156 generate_config_rom(card, tmp_config_rom); 157 card->driver->set_config_rom(card, tmp_config_rom, 158 config_rom_length); 159 } 160 } 161 162 static size_t required_space(struct fw_descriptor *desc) 163 { 164 /* descriptor + entry into root dir + optional immediate entry */ 165 return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); 166 } 167 168 int fw_core_add_descriptor(struct fw_descriptor *desc) 169 { 170 size_t i; 171 172 /* 173 * Check descriptor is valid; the length of all blocks in the 174 * descriptor has to add up to exactly the length of the 175 * block. 176 */ 177 i = 0; 178 while (i < desc->length) 179 i += (desc->data[i] >> 16) + 1; 180 181 if (i != desc->length) 182 return -EINVAL; 183 184 guard(mutex)(&card_mutex); 185 186 if (config_rom_length + required_space(desc) > 256) 187 return -EBUSY; 188 189 list_add_tail(&desc->link, &descriptor_list); 190 config_rom_length += required_space(desc); 191 descriptor_count++; 192 if (desc->immediate > 0) 193 descriptor_count++; 194 update_config_roms(); 195 196 return 0; 197 } 198 EXPORT_SYMBOL(fw_core_add_descriptor); 199 200 void fw_core_remove_descriptor(struct fw_descriptor *desc) 201 { 202 guard(mutex)(&card_mutex); 203 204 list_del(&desc->link); 205 config_rom_length -= required_space(desc); 206 descriptor_count--; 207 if (desc->immediate > 0) 208 descriptor_count--; 209 update_config_roms(); 210 } 211 EXPORT_SYMBOL(fw_core_remove_descriptor); 212 213 static int reset_bus(struct fw_card *card, bool short_reset) 214 { 215 int reg = short_reset ? 5 : 1; 216 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 217 218 trace_bus_reset_initiate(card->index, card->generation, short_reset); 219 220 return card->driver->update_phy_reg(card, reg, 0, bit); 221 } 222 223 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) 224 { 225 trace_bus_reset_schedule(card->index, card->generation, short_reset); 226 227 /* We don't try hard to sort out requests of long vs. short resets. */ 228 card->br_short = short_reset; 229 230 /* Use an arbitrary short delay to combine multiple reset requests. */ 231 fw_card_get(card); 232 if (!queue_delayed_work(fw_workqueue, &card->br_work, 233 delayed ? DIV_ROUND_UP(HZ, 100) : 0)) 234 fw_card_put(card); 235 } 236 EXPORT_SYMBOL(fw_schedule_bus_reset); 237 238 static void br_work(struct work_struct *work) 239 { 240 struct fw_card *card = from_work(card, work, br_work.work); 241 242 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 243 if (card->reset_jiffies != 0 && 244 time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { 245 trace_bus_reset_postpone(card->index, card->generation, card->br_short); 246 247 if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) 248 fw_card_put(card); 249 return; 250 } 251 252 fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, 253 FW_PHY_CONFIG_CURRENT_GAP_COUNT); 254 reset_bus(card, card->br_short); 255 fw_card_put(card); 256 } 257 258 static void allocate_broadcast_channel(struct fw_card *card, int generation) 259 { 260 int channel, bandwidth = 0; 261 262 if (!card->broadcast_channel_allocated) { 263 fw_iso_resource_manage(card, generation, 1ULL << 31, 264 &channel, &bandwidth, true); 265 if (channel != 31) { 266 fw_notice(card, "failed to allocate broadcast channel\n"); 267 return; 268 } 269 card->broadcast_channel_allocated = true; 270 } 271 272 device_for_each_child(card->device, (void *)(long)generation, 273 fw_device_set_broadcast_channel); 274 } 275 276 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) 277 { 278 fw_card_get(card); 279 if (!schedule_delayed_work(&card->bm_work, delay)) 280 fw_card_put(card); 281 } 282 283 static void bm_work(struct work_struct *work) 284 { 285 static const char gap_count_table[] = { 286 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 287 }; 288 struct fw_card *card = from_work(card, work, bm_work.work); 289 struct fw_device *root_device, *irm_device; 290 struct fw_node *root_node; 291 int root_id, new_root_id, irm_id, bm_id, local_id; 292 int gap_count, generation, grace, rcode; 293 bool do_reset = false; 294 bool root_device_is_running; 295 bool root_device_is_cmc; 296 bool irm_is_1394_1995_only; 297 bool keep_this_irm; 298 __be32 transaction_data[2]; 299 300 spin_lock_irq(&card->lock); 301 302 if (card->local_node == NULL) { 303 spin_unlock_irq(&card->lock); 304 goto out_put_card; 305 } 306 307 generation = card->generation; 308 309 root_node = card->root_node; 310 fw_node_get(root_node); 311 root_device = root_node->data; 312 root_device_is_running = root_device && 313 atomic_read(&root_device->state) == FW_DEVICE_RUNNING; 314 root_device_is_cmc = root_device && root_device->cmc; 315 316 irm_device = card->irm_node->data; 317 irm_is_1394_1995_only = irm_device && irm_device->config_rom && 318 (irm_device->config_rom[2] & 0x000000f0) == 0; 319 320 /* Canon MV5i works unreliably if it is not root node. */ 321 keep_this_irm = irm_device && irm_device->config_rom && 322 irm_device->config_rom[3] >> 8 == CANON_OUI; 323 324 root_id = root_node->node_id; 325 irm_id = card->irm_node->node_id; 326 local_id = card->local_node->node_id; 327 328 grace = time_after64(get_jiffies_64(), 329 card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 330 331 if ((is_next_generation(generation, card->bm_generation) && 332 !card->bm_abdicate) || 333 (card->bm_generation != generation && grace)) { 334 /* 335 * This first step is to figure out who is IRM and 336 * then try to become bus manager. If the IRM is not 337 * well defined (e.g. does not have an active link 338 * layer or does not responds to our lock request, we 339 * will have to do a little vigilante bus management. 340 * In that case, we do a goto into the gap count logic 341 * so that when we do the reset, we still optimize the 342 * gap count. That could well save a reset in the 343 * next generation. 344 */ 345 346 if (!card->irm_node->link_on) { 347 new_root_id = local_id; 348 fw_notice(card, "%s, making local node (%02x) root\n", 349 "IRM has link off", new_root_id); 350 goto pick_me; 351 } 352 353 if (irm_is_1394_1995_only && !keep_this_irm) { 354 new_root_id = local_id; 355 fw_notice(card, "%s, making local node (%02x) root\n", 356 "IRM is not 1394a compliant", new_root_id); 357 goto pick_me; 358 } 359 360 transaction_data[0] = cpu_to_be32(0x3f); 361 transaction_data[1] = cpu_to_be32(local_id); 362 363 spin_unlock_irq(&card->lock); 364 365 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 366 irm_id, generation, SCODE_100, 367 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 368 transaction_data, 8); 369 370 if (rcode == RCODE_GENERATION) 371 /* Another bus reset, BM work has been rescheduled. */ 372 goto out; 373 374 bm_id = be32_to_cpu(transaction_data[0]); 375 376 scoped_guard(spinlock_irq, &card->lock) { 377 if (rcode == RCODE_COMPLETE && generation == card->generation) 378 card->bm_node_id = 379 bm_id == 0x3f ? local_id : 0xffc0 | bm_id; 380 } 381 382 if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { 383 /* Somebody else is BM. Only act as IRM. */ 384 if (local_id == irm_id) 385 allocate_broadcast_channel(card, generation); 386 387 goto out; 388 } 389 390 if (rcode == RCODE_SEND_ERROR) { 391 /* 392 * We have been unable to send the lock request due to 393 * some local problem. Let's try again later and hope 394 * that the problem has gone away by then. 395 */ 396 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 397 goto out; 398 } 399 400 spin_lock_irq(&card->lock); 401 402 if (rcode != RCODE_COMPLETE && !keep_this_irm) { 403 /* 404 * The lock request failed, maybe the IRM 405 * isn't really IRM capable after all. Let's 406 * do a bus reset and pick the local node as 407 * root, and thus, IRM. 408 */ 409 new_root_id = local_id; 410 fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n", 411 fw_rcode_string(rcode), new_root_id); 412 goto pick_me; 413 } 414 } else if (card->bm_generation != generation) { 415 /* 416 * We weren't BM in the last generation, and the last 417 * bus reset is less than 125ms ago. Reschedule this job. 418 */ 419 spin_unlock_irq(&card->lock); 420 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 421 goto out; 422 } 423 424 /* 425 * We're bus manager for this generation, so next step is to 426 * make sure we have an active cycle master and do gap count 427 * optimization. 428 */ 429 card->bm_generation = generation; 430 431 if (card->gap_count == 0) { 432 /* 433 * If self IDs have inconsistent gap counts, do a 434 * bus reset ASAP. The config rom read might never 435 * complete, so don't wait for it. However, still 436 * send a PHY configuration packet prior to the 437 * bus reset. The PHY configuration packet might 438 * fail, but 1394-2008 8.4.5.2 explicitly permits 439 * it in this case, so it should be safe to try. 440 */ 441 new_root_id = local_id; 442 /* 443 * We must always send a bus reset if the gap count 444 * is inconsistent, so bypass the 5-reset limit. 445 */ 446 card->bm_retries = 0; 447 } else if (root_device == NULL) { 448 /* 449 * Either link_on is false, or we failed to read the 450 * config rom. In either case, pick another root. 451 */ 452 new_root_id = local_id; 453 } else if (!root_device_is_running) { 454 /* 455 * If we haven't probed this device yet, bail out now 456 * and let's try again once that's done. 457 */ 458 spin_unlock_irq(&card->lock); 459 goto out; 460 } else if (root_device_is_cmc) { 461 /* 462 * We will send out a force root packet for this 463 * node as part of the gap count optimization. 464 */ 465 new_root_id = root_id; 466 } else { 467 /* 468 * Current root has an active link layer and we 469 * successfully read the config rom, but it's not 470 * cycle master capable. 471 */ 472 new_root_id = local_id; 473 } 474 475 pick_me: 476 /* 477 * Pick a gap count from 1394a table E-1. The table doesn't cover 478 * the typically much larger 1394b beta repeater delays though. 479 */ 480 if (!card->beta_repeaters_present && 481 root_node->max_hops < ARRAY_SIZE(gap_count_table)) 482 gap_count = gap_count_table[root_node->max_hops]; 483 else 484 gap_count = 63; 485 486 /* 487 * Finally, figure out if we should do a reset or not. If we have 488 * done less than 5 resets with the same physical topology and we 489 * have either a new root or a new gap count setting, let's do it. 490 */ 491 492 if (card->bm_retries++ < 5 && 493 (card->gap_count != gap_count || new_root_id != root_id)) 494 do_reset = true; 495 496 spin_unlock_irq(&card->lock); 497 498 if (do_reset) { 499 fw_notice(card, "phy config: new root=%x, gap_count=%d\n", 500 new_root_id, gap_count); 501 fw_send_phy_config(card, new_root_id, generation, gap_count); 502 /* 503 * Where possible, use a short bus reset to minimize 504 * disruption to isochronous transfers. But in the event 505 * of a gap count inconsistency, use a long bus reset. 506 * 507 * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus 508 * may set different gap counts after a bus reset. On a mixed 509 * 1394/1394a bus, a short bus reset can get doubled. Some 510 * nodes may treat the double reset as one bus reset and others 511 * may treat it as two, causing a gap count inconsistency 512 * again. Using a long bus reset prevents this. 513 */ 514 reset_bus(card, card->gap_count != 0); 515 /* Will allocate broadcast channel after the reset. */ 516 goto out; 517 } 518 519 if (root_device_is_cmc) { 520 /* 521 * Make sure that the cycle master sends cycle start packets. 522 */ 523 transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); 524 rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 525 root_id, generation, SCODE_100, 526 CSR_REGISTER_BASE + CSR_STATE_SET, 527 transaction_data, 4); 528 if (rcode == RCODE_GENERATION) 529 goto out; 530 } 531 532 if (local_id == irm_id) 533 allocate_broadcast_channel(card, generation); 534 535 out: 536 fw_node_put(root_node); 537 out_put_card: 538 fw_card_put(card); 539 } 540 541 void fw_card_initialize(struct fw_card *card, 542 const struct fw_card_driver *driver, 543 struct device *device) 544 { 545 static atomic_t index = ATOMIC_INIT(-1); 546 547 card->index = atomic_inc_return(&index); 548 card->driver = driver; 549 card->device = device; 550 card->current_tlabel = 0; 551 card->tlabel_mask = 0; 552 card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; 553 card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; 554 card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; 555 card->split_timeout_jiffies = 556 DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); 557 card->color = 0; 558 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 559 560 kref_init(&card->kref); 561 init_completion(&card->done); 562 INIT_LIST_HEAD(&card->transaction_list); 563 INIT_LIST_HEAD(&card->phy_receiver_list); 564 spin_lock_init(&card->lock); 565 566 card->local_node = NULL; 567 568 INIT_DELAYED_WORK(&card->br_work, br_work); 569 INIT_DELAYED_WORK(&card->bm_work, bm_work); 570 } 571 EXPORT_SYMBOL(fw_card_initialize); 572 573 int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid, 574 unsigned int supported_isoc_contexts) 575 { 576 int ret; 577 578 // This workqueue should be: 579 // * != WQ_BH Sleepable. 580 // * == WQ_UNBOUND Any core can process data for isoc context. The 581 // implementation of unit protocol could consumes the core 582 // longer somehow. 583 // * != WQ_MEM_RECLAIM Not used for any backend of block device. 584 // * == WQ_FREEZABLE Isochronous communication is at regular interval in real 585 // time, thus should be drained if possible at freeze phase. 586 // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data. 587 // * == WQ_SYSFS Parameters are available via sysfs. 588 // * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous 589 // contexts if they are scheduled to the same cycle. 590 card->isoc_wq = alloc_workqueue("firewire-isoc-card%u", 591 WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS, 592 supported_isoc_contexts, card->index); 593 if (!card->isoc_wq) 594 return -ENOMEM; 595 596 // This workqueue should be: 597 // * != WQ_BH Sleepable. 598 // * == WQ_UNBOUND Any core can process data for asynchronous context. 599 // * == WQ_MEM_RECLAIM Used for any backend of block device. 600 // * == WQ_FREEZABLE The target device would not be available when being freezed. 601 // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data. 602 // * == WQ_SYSFS Parameters are available via sysfs. 603 // * max_active == 4 A hardIRQ could notify events for a pair of requests and 604 // response AR/AT contexts. 605 card->async_wq = alloc_workqueue("firewire-async-card%u", 606 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS, 607 4, card->index); 608 if (!card->async_wq) { 609 ret = -ENOMEM; 610 goto err_isoc; 611 } 612 613 card->max_receive = max_receive; 614 card->link_speed = link_speed; 615 card->guid = guid; 616 617 scoped_guard(mutex, &card_mutex) { 618 generate_config_rom(card, tmp_config_rom); 619 ret = card->driver->enable(card, tmp_config_rom, config_rom_length); 620 if (ret < 0) 621 goto err_async; 622 623 list_add_tail(&card->link, &card_list); 624 } 625 626 return 0; 627 err_async: 628 destroy_workqueue(card->async_wq); 629 err_isoc: 630 destroy_workqueue(card->isoc_wq); 631 return ret; 632 } 633 EXPORT_SYMBOL(fw_card_add); 634 635 /* 636 * The next few functions implement a dummy driver that is used once a card 637 * driver shuts down an fw_card. This allows the driver to cleanly unload, 638 * as all IO to the card will be handled (and failed) by the dummy driver 639 * instead of calling into the module. Only functions for iso context 640 * shutdown still need to be provided by the card driver. 641 * 642 * .read/write_csr() should never be called anymore after the dummy driver 643 * was bound since they are only used within request handler context. 644 * .set_config_rom() is never called since the card is taken out of card_list 645 * before switching to the dummy driver. 646 */ 647 648 static int dummy_read_phy_reg(struct fw_card *card, int address) 649 { 650 return -ENODEV; 651 } 652 653 static int dummy_update_phy_reg(struct fw_card *card, int address, 654 int clear_bits, int set_bits) 655 { 656 return -ENODEV; 657 } 658 659 static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) 660 { 661 packet->callback(packet, card, RCODE_CANCELLED); 662 } 663 664 static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) 665 { 666 packet->callback(packet, card, RCODE_CANCELLED); 667 } 668 669 static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) 670 { 671 return -ENOENT; 672 } 673 674 static int dummy_enable_phys_dma(struct fw_card *card, 675 int node_id, int generation) 676 { 677 return -ENODEV; 678 } 679 680 static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, 681 int type, int channel, size_t header_size) 682 { 683 return ERR_PTR(-ENODEV); 684 } 685 686 static u32 dummy_read_csr(struct fw_card *card, int csr_offset) 687 { 688 return 0; 689 } 690 691 static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value) 692 { 693 } 694 695 static int dummy_start_iso(struct fw_iso_context *ctx, 696 s32 cycle, u32 sync, u32 tags) 697 { 698 return -ENODEV; 699 } 700 701 static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) 702 { 703 return -ENODEV; 704 } 705 706 static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, 707 struct fw_iso_buffer *buffer, unsigned long payload) 708 { 709 return -ENODEV; 710 } 711 712 static void dummy_flush_queue_iso(struct fw_iso_context *ctx) 713 { 714 } 715 716 static int dummy_flush_iso_completions(struct fw_iso_context *ctx) 717 { 718 return -ENODEV; 719 } 720 721 static const struct fw_card_driver dummy_driver_template = { 722 .read_phy_reg = dummy_read_phy_reg, 723 .update_phy_reg = dummy_update_phy_reg, 724 .send_request = dummy_send_request, 725 .send_response = dummy_send_response, 726 .cancel_packet = dummy_cancel_packet, 727 .enable_phys_dma = dummy_enable_phys_dma, 728 .read_csr = dummy_read_csr, 729 .write_csr = dummy_write_csr, 730 .allocate_iso_context = dummy_allocate_iso_context, 731 .start_iso = dummy_start_iso, 732 .set_iso_channels = dummy_set_iso_channels, 733 .queue_iso = dummy_queue_iso, 734 .flush_queue_iso = dummy_flush_queue_iso, 735 .flush_iso_completions = dummy_flush_iso_completions, 736 }; 737 738 void fw_card_release(struct kref *kref) 739 { 740 struct fw_card *card = container_of(kref, struct fw_card, kref); 741 742 complete(&card->done); 743 } 744 EXPORT_SYMBOL_GPL(fw_card_release); 745 746 void fw_core_remove_card(struct fw_card *card) 747 { 748 struct fw_card_driver dummy_driver = dummy_driver_template; 749 750 might_sleep(); 751 752 card->driver->update_phy_reg(card, 4, 753 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 754 fw_schedule_bus_reset(card, false, true); 755 756 scoped_guard(mutex, &card_mutex) 757 list_del_init(&card->link); 758 759 /* Switch off most of the card driver interface. */ 760 dummy_driver.free_iso_context = card->driver->free_iso_context; 761 dummy_driver.stop_iso = card->driver->stop_iso; 762 card->driver = &dummy_driver; 763 drain_workqueue(card->isoc_wq); 764 drain_workqueue(card->async_wq); 765 766 scoped_guard(spinlock_irqsave, &card->lock) 767 fw_destroy_nodes(card); 768 769 /* Wait for all users, especially device workqueue jobs, to finish. */ 770 fw_card_put(card); 771 wait_for_completion(&card->done); 772 773 destroy_workqueue(card->isoc_wq); 774 destroy_workqueue(card->async_wq); 775 776 WARN_ON(!list_empty(&card->transaction_list)); 777 } 778 EXPORT_SYMBOL(fw_core_remove_card); 779 780 /** 781 * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region 782 * for controller card. 783 * @card: The instance of card for 1394 OHCI controller. 784 * @cycle_time: The mutual reference to value of cycle time for the read operation. 785 * 786 * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given 787 * controller card. This function accesses the region without any lock primitives or IRQ mask. 788 * When returning successfully, the content of @value argument has value aligned to host endianness, 789 * formetted by CYCLE_TIME CSR Register of IEEE 1394 std. 790 * 791 * Context: Any context. 792 * Return: 793 * * 0 - Read successfully. 794 * * -ENODEV - The controller is unavailable due to being removed or unbound. 795 */ 796 int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time) 797 { 798 if (card->driver->read_csr == dummy_read_csr) 799 return -ENODEV; 800 801 // It's possible to switch to dummy driver between the above and the below. This is the best 802 // effort to return -ENODEV. 803 *cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); 804 return 0; 805 } 806 EXPORT_SYMBOL_GPL(fw_card_read_cycle_time); 807