1 /* 2 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software Foundation, 16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 */ 18 19 #include <linux/bug.h> 20 #include <linux/completion.h> 21 #include <linux/crc-itu-t.h> 22 #include <linux/device.h> 23 #include <linux/errno.h> 24 #include <linux/firewire.h> 25 #include <linux/firewire-constants.h> 26 #include <linux/jiffies.h> 27 #include <linux/kernel.h> 28 #include <linux/kref.h> 29 #include <linux/list.h> 30 #include <linux/module.h> 31 #include <linux/mutex.h> 32 #include <linux/spinlock.h> 33 #include <linux/workqueue.h> 34 35 #include <asm/atomic.h> 36 #include <asm/byteorder.h> 37 38 #include "core.h" 39 40 int fw_compute_block_crc(__be32 *block) 41 { 42 int length; 43 u16 crc; 44 45 length = (be32_to_cpu(block[0]) >> 16) & 0xff; 46 crc = crc_itu_t(0, (u8 *)&block[1], length * 4); 47 *block |= cpu_to_be32(crc); 48 49 return length; 50 } 51 52 static DEFINE_MUTEX(card_mutex); 53 static LIST_HEAD(card_list); 54 55 static LIST_HEAD(descriptor_list); 56 static int descriptor_count; 57 58 static __be32 tmp_config_rom[256]; 59 /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ 60 static size_t config_rom_length = 1 + 4 + 1 + 1; 61 62 #define BIB_CRC(v) ((v) << 0) 63 #define BIB_CRC_LENGTH(v) ((v) << 16) 64 #define BIB_INFO_LENGTH(v) ((v) << 24) 65 #define BIB_BUS_NAME 0x31333934 /* "1394" */ 66 #define BIB_LINK_SPEED(v) ((v) << 0) 67 #define BIB_GENERATION(v) ((v) << 4) 68 #define BIB_MAX_ROM(v) ((v) << 8) 69 #define BIB_MAX_RECEIVE(v) ((v) << 12) 70 #define BIB_CYC_CLK_ACC(v) ((v) << 16) 71 #define BIB_PMC ((1) << 27) 72 #define BIB_BMC ((1) << 28) 73 #define BIB_ISC ((1) << 29) 74 #define BIB_CMC ((1) << 30) 75 #define BIB_IRMC ((1) << 31) 76 #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ 77 78 static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 79 { 80 struct fw_descriptor *desc; 81 int i, j, k, length; 82 83 /* 84 * Initialize contents of config rom buffer. On the OHCI 85 * controller, block reads to the config rom accesses the host 86 * memory, but quadlet read access the hardware bus info block 87 * registers. That's just crack, but it means we should make 88 * sure the contents of bus info block in host memory matches 89 * the version stored in the OHCI registers. 90 */ 91 92 config_rom[0] = cpu_to_be32( 93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); 94 config_rom[1] = cpu_to_be32(BIB_BUS_NAME); 95 config_rom[2] = cpu_to_be32( 96 BIB_LINK_SPEED(card->link_speed) | 97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 98 BIB_MAX_ROM(2) | 99 BIB_MAX_RECEIVE(card->max_receive) | 100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); 101 config_rom[3] = cpu_to_be32(card->guid >> 32); 102 config_rom[4] = cpu_to_be32(card->guid); 103 104 /* Generate root directory. */ 105 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); 106 i = 7; 107 j = 7 + descriptor_count; 108 109 /* Generate root directory entries for descriptors. */ 110 list_for_each_entry (desc, &descriptor_list, link) { 111 if (desc->immediate > 0) 112 config_rom[i++] = cpu_to_be32(desc->immediate); 113 config_rom[i] = cpu_to_be32(desc->key | (j - i)); 114 i++; 115 j += desc->length; 116 } 117 118 /* Update root directory length. */ 119 config_rom[5] = cpu_to_be32((i - 5 - 1) << 16); 120 121 /* End of root directory, now copy in descriptors. */ 122 list_for_each_entry (desc, &descriptor_list, link) { 123 for (k = 0; k < desc->length; k++) 124 config_rom[i + k] = cpu_to_be32(desc->data[k]); 125 i += desc->length; 126 } 127 128 /* Calculate CRCs for all blocks in the config rom. This 129 * assumes that CRC length and info length are identical for 130 * the bus info block, which is always the case for this 131 * implementation. */ 132 for (i = 0; i < j; i += length + 1) 133 length = fw_compute_block_crc(config_rom + i); 134 135 WARN_ON(j != config_rom_length); 136 } 137 138 static void update_config_roms(void) 139 { 140 struct fw_card *card; 141 142 list_for_each_entry (card, &card_list, link) { 143 generate_config_rom(card, tmp_config_rom); 144 card->driver->set_config_rom(card, tmp_config_rom, 145 config_rom_length); 146 } 147 } 148 149 static size_t required_space(struct fw_descriptor *desc) 150 { 151 /* descriptor + entry into root dir + optional immediate entry */ 152 return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); 153 } 154 155 int fw_core_add_descriptor(struct fw_descriptor *desc) 156 { 157 size_t i; 158 int ret; 159 160 /* 161 * Check descriptor is valid; the length of all blocks in the 162 * descriptor has to add up to exactly the length of the 163 * block. 164 */ 165 i = 0; 166 while (i < desc->length) 167 i += (desc->data[i] >> 16) + 1; 168 169 if (i != desc->length) 170 return -EINVAL; 171 172 mutex_lock(&card_mutex); 173 174 if (config_rom_length + required_space(desc) > 256) { 175 ret = -EBUSY; 176 } else { 177 list_add_tail(&desc->link, &descriptor_list); 178 config_rom_length += required_space(desc); 179 descriptor_count++; 180 if (desc->immediate > 0) 181 descriptor_count++; 182 update_config_roms(); 183 ret = 0; 184 } 185 186 mutex_unlock(&card_mutex); 187 188 return ret; 189 } 190 EXPORT_SYMBOL(fw_core_add_descriptor); 191 192 void fw_core_remove_descriptor(struct fw_descriptor *desc) 193 { 194 mutex_lock(&card_mutex); 195 196 list_del(&desc->link); 197 config_rom_length -= required_space(desc); 198 descriptor_count--; 199 if (desc->immediate > 0) 200 descriptor_count--; 201 update_config_roms(); 202 203 mutex_unlock(&card_mutex); 204 } 205 EXPORT_SYMBOL(fw_core_remove_descriptor); 206 207 static int reset_bus(struct fw_card *card, bool short_reset) 208 { 209 int reg = short_reset ? 5 : 1; 210 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 211 212 return card->driver->update_phy_reg(card, reg, 0, bit); 213 } 214 215 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) 216 { 217 /* We don't try hard to sort out requests of long vs. short resets. */ 218 card->br_short = short_reset; 219 220 /* Use an arbitrary short delay to combine multiple reset requests. */ 221 fw_card_get(card); 222 if (!schedule_delayed_work(&card->br_work, 223 delayed ? DIV_ROUND_UP(HZ, 100) : 0)) 224 fw_card_put(card); 225 } 226 EXPORT_SYMBOL(fw_schedule_bus_reset); 227 228 static void br_work(struct work_struct *work) 229 { 230 struct fw_card *card = container_of(work, struct fw_card, br_work.work); 231 232 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 233 if (card->reset_jiffies != 0 && 234 time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { 235 if (!schedule_delayed_work(&card->br_work, 2 * HZ)) 236 fw_card_put(card); 237 return; 238 } 239 240 fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, 241 FW_PHY_CONFIG_CURRENT_GAP_COUNT); 242 reset_bus(card, card->br_short); 243 fw_card_put(card); 244 } 245 246 static void allocate_broadcast_channel(struct fw_card *card, int generation) 247 { 248 int channel, bandwidth = 0; 249 250 if (!card->broadcast_channel_allocated) { 251 fw_iso_resource_manage(card, generation, 1ULL << 31, 252 &channel, &bandwidth, true, 253 card->bm_transaction_data); 254 if (channel != 31) { 255 fw_notify("failed to allocate broadcast channel\n"); 256 return; 257 } 258 card->broadcast_channel_allocated = true; 259 } 260 261 device_for_each_child(card->device, (void *)(long)generation, 262 fw_device_set_broadcast_channel); 263 } 264 265 static const char gap_count_table[] = { 266 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 267 }; 268 269 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) 270 { 271 fw_card_get(card); 272 if (!schedule_delayed_work(&card->bm_work, delay)) 273 fw_card_put(card); 274 } 275 276 static void bm_work(struct work_struct *work) 277 { 278 struct fw_card *card = container_of(work, struct fw_card, bm_work.work); 279 struct fw_device *root_device, *irm_device; 280 struct fw_node *root_node; 281 int root_id, new_root_id, irm_id, bm_id, local_id; 282 int gap_count, generation, grace, rcode; 283 bool do_reset = false; 284 bool root_device_is_running; 285 bool root_device_is_cmc; 286 bool irm_is_1394_1995_only; 287 288 spin_lock_irq(&card->lock); 289 290 if (card->local_node == NULL) { 291 spin_unlock_irq(&card->lock); 292 goto out_put_card; 293 } 294 295 generation = card->generation; 296 297 root_node = card->root_node; 298 fw_node_get(root_node); 299 root_device = root_node->data; 300 root_device_is_running = root_device && 301 atomic_read(&root_device->state) == FW_DEVICE_RUNNING; 302 root_device_is_cmc = root_device && root_device->cmc; 303 304 irm_device = card->irm_node->data; 305 irm_is_1394_1995_only = irm_device && irm_device->config_rom && 306 (irm_device->config_rom[2] & 0x000000f0) == 0; 307 308 root_id = root_node->node_id; 309 irm_id = card->irm_node->node_id; 310 local_id = card->local_node->node_id; 311 312 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); 313 314 if ((is_next_generation(generation, card->bm_generation) && 315 !card->bm_abdicate) || 316 (card->bm_generation != generation && grace)) { 317 /* 318 * This first step is to figure out who is IRM and 319 * then try to become bus manager. If the IRM is not 320 * well defined (e.g. does not have an active link 321 * layer or does not responds to our lock request, we 322 * will have to do a little vigilante bus management. 323 * In that case, we do a goto into the gap count logic 324 * so that when we do the reset, we still optimize the 325 * gap count. That could well save a reset in the 326 * next generation. 327 */ 328 329 if (!card->irm_node->link_on) { 330 new_root_id = local_id; 331 fw_notify("%s, making local node (%02x) root.\n", 332 "IRM has link off", new_root_id); 333 goto pick_me; 334 } 335 336 if (irm_is_1394_1995_only) { 337 new_root_id = local_id; 338 fw_notify("%s, making local node (%02x) root.\n", 339 "IRM is not 1394a compliant", new_root_id); 340 goto pick_me; 341 } 342 343 card->bm_transaction_data[0] = cpu_to_be32(0x3f); 344 card->bm_transaction_data[1] = cpu_to_be32(local_id); 345 346 spin_unlock_irq(&card->lock); 347 348 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 349 irm_id, generation, SCODE_100, 350 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 351 card->bm_transaction_data, 8); 352 353 if (rcode == RCODE_GENERATION) 354 /* Another bus reset, BM work has been rescheduled. */ 355 goto out; 356 357 bm_id = be32_to_cpu(card->bm_transaction_data[0]); 358 359 spin_lock_irq(&card->lock); 360 if (rcode == RCODE_COMPLETE && generation == card->generation) 361 card->bm_node_id = 362 bm_id == 0x3f ? local_id : 0xffc0 | bm_id; 363 spin_unlock_irq(&card->lock); 364 365 if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { 366 /* Somebody else is BM. Only act as IRM. */ 367 if (local_id == irm_id) 368 allocate_broadcast_channel(card, generation); 369 370 goto out; 371 } 372 373 if (rcode == RCODE_SEND_ERROR) { 374 /* 375 * We have been unable to send the lock request due to 376 * some local problem. Let's try again later and hope 377 * that the problem has gone away by then. 378 */ 379 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 380 goto out; 381 } 382 383 spin_lock_irq(&card->lock); 384 385 if (rcode != RCODE_COMPLETE) { 386 /* 387 * The lock request failed, maybe the IRM 388 * isn't really IRM capable after all. Let's 389 * do a bus reset and pick the local node as 390 * root, and thus, IRM. 391 */ 392 new_root_id = local_id; 393 fw_notify("%s, making local node (%02x) root.\n", 394 "BM lock failed", new_root_id); 395 goto pick_me; 396 } 397 } else if (card->bm_generation != generation) { 398 /* 399 * We weren't BM in the last generation, and the last 400 * bus reset is less than 125ms ago. Reschedule this job. 401 */ 402 spin_unlock_irq(&card->lock); 403 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); 404 goto out; 405 } 406 407 /* 408 * We're bus manager for this generation, so next step is to 409 * make sure we have an active cycle master and do gap count 410 * optimization. 411 */ 412 card->bm_generation = generation; 413 414 if (root_device == NULL) { 415 /* 416 * Either link_on is false, or we failed to read the 417 * config rom. In either case, pick another root. 418 */ 419 new_root_id = local_id; 420 } else if (!root_device_is_running) { 421 /* 422 * If we haven't probed this device yet, bail out now 423 * and let's try again once that's done. 424 */ 425 spin_unlock_irq(&card->lock); 426 goto out; 427 } else if (root_device_is_cmc) { 428 /* 429 * We will send out a force root packet for this 430 * node as part of the gap count optimization. 431 */ 432 new_root_id = root_id; 433 } else { 434 /* 435 * Current root has an active link layer and we 436 * successfully read the config rom, but it's not 437 * cycle master capable. 438 */ 439 new_root_id = local_id; 440 } 441 442 pick_me: 443 /* 444 * Pick a gap count from 1394a table E-1. The table doesn't cover 445 * the typically much larger 1394b beta repeater delays though. 446 */ 447 if (!card->beta_repeaters_present && 448 root_node->max_hops < ARRAY_SIZE(gap_count_table)) 449 gap_count = gap_count_table[root_node->max_hops]; 450 else 451 gap_count = 63; 452 453 /* 454 * Finally, figure out if we should do a reset or not. If we have 455 * done less than 5 resets with the same physical topology and we 456 * have either a new root or a new gap count setting, let's do it. 457 */ 458 459 if (card->bm_retries++ < 5 && 460 (card->gap_count != gap_count || new_root_id != root_id)) 461 do_reset = true; 462 463 spin_unlock_irq(&card->lock); 464 465 if (do_reset) { 466 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", 467 card->index, new_root_id, gap_count); 468 fw_send_phy_config(card, new_root_id, generation, gap_count); 469 reset_bus(card, true); 470 /* Will allocate broadcast channel after the reset. */ 471 goto out; 472 } 473 474 if (root_device_is_cmc) { 475 /* 476 * Make sure that the cycle master sends cycle start packets. 477 */ 478 card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); 479 rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 480 root_id, generation, SCODE_100, 481 CSR_REGISTER_BASE + CSR_STATE_SET, 482 card->bm_transaction_data, 4); 483 if (rcode == RCODE_GENERATION) 484 goto out; 485 } 486 487 if (local_id == irm_id) 488 allocate_broadcast_channel(card, generation); 489 490 out: 491 fw_node_put(root_node); 492 out_put_card: 493 fw_card_put(card); 494 } 495 496 void fw_card_initialize(struct fw_card *card, 497 const struct fw_card_driver *driver, 498 struct device *device) 499 { 500 static atomic_t index = ATOMIC_INIT(-1); 501 502 card->index = atomic_inc_return(&index); 503 card->driver = driver; 504 card->device = device; 505 card->current_tlabel = 0; 506 card->tlabel_mask = 0; 507 card->split_timeout_hi = 0; 508 card->split_timeout_lo = 800 << 19; 509 card->split_timeout_cycles = 800; 510 card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); 511 card->color = 0; 512 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; 513 514 kref_init(&card->kref); 515 init_completion(&card->done); 516 INIT_LIST_HEAD(&card->transaction_list); 517 INIT_LIST_HEAD(&card->phy_receiver_list); 518 spin_lock_init(&card->lock); 519 520 card->local_node = NULL; 521 522 INIT_DELAYED_WORK(&card->br_work, br_work); 523 INIT_DELAYED_WORK(&card->bm_work, bm_work); 524 } 525 EXPORT_SYMBOL(fw_card_initialize); 526 527 int fw_card_add(struct fw_card *card, 528 u32 max_receive, u32 link_speed, u64 guid) 529 { 530 int ret; 531 532 card->max_receive = max_receive; 533 card->link_speed = link_speed; 534 card->guid = guid; 535 536 mutex_lock(&card_mutex); 537 538 generate_config_rom(card, tmp_config_rom); 539 ret = card->driver->enable(card, tmp_config_rom, config_rom_length); 540 if (ret == 0) 541 list_add_tail(&card->link, &card_list); 542 543 mutex_unlock(&card_mutex); 544 545 return ret; 546 } 547 EXPORT_SYMBOL(fw_card_add); 548 549 /* 550 * The next few functions implement a dummy driver that is used once a card 551 * driver shuts down an fw_card. This allows the driver to cleanly unload, 552 * as all IO to the card will be handled (and failed) by the dummy driver 553 * instead of calling into the module. Only functions for iso context 554 * shutdown still need to be provided by the card driver. 555 * 556 * .read/write_csr() should never be called anymore after the dummy driver 557 * was bound since they are only used within request handler context. 558 * .set_config_rom() is never called since the card is taken out of card_list 559 * before switching to the dummy driver. 560 */ 561 562 static int dummy_read_phy_reg(struct fw_card *card, int address) 563 { 564 return -ENODEV; 565 } 566 567 static int dummy_update_phy_reg(struct fw_card *card, int address, 568 int clear_bits, int set_bits) 569 { 570 return -ENODEV; 571 } 572 573 static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) 574 { 575 packet->callback(packet, card, RCODE_CANCELLED); 576 } 577 578 static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) 579 { 580 packet->callback(packet, card, RCODE_CANCELLED); 581 } 582 583 static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) 584 { 585 return -ENOENT; 586 } 587 588 static int dummy_enable_phys_dma(struct fw_card *card, 589 int node_id, int generation) 590 { 591 return -ENODEV; 592 } 593 594 static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, 595 int type, int channel, size_t header_size) 596 { 597 return ERR_PTR(-ENODEV); 598 } 599 600 static int dummy_start_iso(struct fw_iso_context *ctx, 601 s32 cycle, u32 sync, u32 tags) 602 { 603 return -ENODEV; 604 } 605 606 static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) 607 { 608 return -ENODEV; 609 } 610 611 static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, 612 struct fw_iso_buffer *buffer, unsigned long payload) 613 { 614 return -ENODEV; 615 } 616 617 static const struct fw_card_driver dummy_driver_template = { 618 .read_phy_reg = dummy_read_phy_reg, 619 .update_phy_reg = dummy_update_phy_reg, 620 .send_request = dummy_send_request, 621 .send_response = dummy_send_response, 622 .cancel_packet = dummy_cancel_packet, 623 .enable_phys_dma = dummy_enable_phys_dma, 624 .allocate_iso_context = dummy_allocate_iso_context, 625 .start_iso = dummy_start_iso, 626 .set_iso_channels = dummy_set_iso_channels, 627 .queue_iso = dummy_queue_iso, 628 }; 629 630 void fw_card_release(struct kref *kref) 631 { 632 struct fw_card *card = container_of(kref, struct fw_card, kref); 633 634 complete(&card->done); 635 } 636 637 void fw_core_remove_card(struct fw_card *card) 638 { 639 struct fw_card_driver dummy_driver = dummy_driver_template; 640 641 card->driver->update_phy_reg(card, 4, 642 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 643 fw_schedule_bus_reset(card, false, true); 644 645 mutex_lock(&card_mutex); 646 list_del_init(&card->link); 647 mutex_unlock(&card_mutex); 648 649 /* Switch off most of the card driver interface. */ 650 dummy_driver.free_iso_context = card->driver->free_iso_context; 651 dummy_driver.stop_iso = card->driver->stop_iso; 652 card->driver = &dummy_driver; 653 654 fw_destroy_nodes(card); 655 656 /* Wait for all users, especially device workqueue jobs, to finish. */ 657 fw_card_put(card); 658 wait_for_completion(&card->done); 659 660 WARN_ON(!list_empty(&card->transaction_list)); 661 } 662 EXPORT_SYMBOL(fw_core_remove_card); 663