xref: /linux/drivers/firewire/core-card.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
4  */
5 
6 #include <linux/bug.h>
7 #include <linux/completion.h>
8 #include <linux/crc-itu-t.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/kref.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/spinlock.h>
20 #include <linux/workqueue.h>
21 
22 #include <linux/atomic.h>
23 #include <asm/byteorder.h>
24 
25 #include "core.h"
26 #include <trace/events/firewire.h>
27 
28 #define define_fw_printk_level(func, kern_level)		\
29 void func(const struct fw_card *card, const char *fmt, ...)	\
30 {								\
31 	struct va_format vaf;					\
32 	va_list args;						\
33 								\
34 	va_start(args, fmt);					\
35 	vaf.fmt = fmt;						\
36 	vaf.va = &args;						\
37 	printk(kern_level KBUILD_MODNAME " %s: %pV",		\
38 	       dev_name(card->device), &vaf);			\
39 	va_end(args);						\
40 }
41 define_fw_printk_level(fw_err, KERN_ERR);
42 define_fw_printk_level(fw_notice, KERN_NOTICE);
43 
fw_compute_block_crc(__be32 * block)44 int fw_compute_block_crc(__be32 *block)
45 {
46 	int length;
47 	u16 crc;
48 
49 	length = (be32_to_cpu(block[0]) >> 16) & 0xff;
50 	crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
51 	*block |= cpu_to_be32(crc);
52 
53 	return length;
54 }
55 
56 static DEFINE_MUTEX(card_mutex);
57 static LIST_HEAD(card_list);
58 
59 static LIST_HEAD(descriptor_list);
60 static int descriptor_count;
61 
62 static __be32 tmp_config_rom[256];
63 /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
64 static size_t config_rom_length = 1 + 4 + 1 + 1;
65 
66 #define BIB_CRC(v)		((v) <<  0)
67 #define BIB_CRC_LENGTH(v)	((v) << 16)
68 #define BIB_INFO_LENGTH(v)	((v) << 24)
69 #define BIB_BUS_NAME		0x31333934 /* "1394" */
70 #define BIB_LINK_SPEED(v)	((v) <<  0)
71 #define BIB_GENERATION(v)	((v) <<  4)
72 #define BIB_MAX_ROM(v)		((v) <<  8)
73 #define BIB_MAX_RECEIVE(v)	((v) << 12)
74 #define BIB_CYC_CLK_ACC(v)	((v) << 16)
75 #define BIB_PMC			((1) << 27)
76 #define BIB_BMC			((1) << 28)
77 #define BIB_ISC			((1) << 29)
78 #define BIB_CMC			((1) << 30)
79 #define BIB_IRMC		((1) << 31)
80 #define NODE_CAPABILITIES	0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
81 
82 /*
83  * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
84  * but we have to make it longer because there are many devices whose firmware
85  * is just too slow for that.
86  */
87 #define DEFAULT_SPLIT_TIMEOUT	(2 * 8000)
88 
generate_config_rom(struct fw_card * card,__be32 * config_rom)89 static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
90 {
91 	struct fw_descriptor *desc;
92 	int i, j, k, length;
93 
94 	/*
95 	 * Initialize contents of config rom buffer.  On the OHCI
96 	 * controller, block reads to the config rom accesses the host
97 	 * memory, but quadlet read access the hardware bus info block
98 	 * registers.  That's just crack, but it means we should make
99 	 * sure the contents of bus info block in host memory matches
100 	 * the version stored in the OHCI registers.
101 	 */
102 
103 	config_rom[0] = cpu_to_be32(
104 		BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
105 	config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
106 	config_rom[2] = cpu_to_be32(
107 		BIB_LINK_SPEED(card->link_speed) |
108 		BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
109 		BIB_MAX_ROM(2) |
110 		BIB_MAX_RECEIVE(card->max_receive) |
111 		BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
112 	config_rom[3] = cpu_to_be32(card->guid >> 32);
113 	config_rom[4] = cpu_to_be32(card->guid);
114 
115 	/* Generate root directory. */
116 	config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
117 	i = 7;
118 	j = 7 + descriptor_count;
119 
120 	/* Generate root directory entries for descriptors. */
121 	list_for_each_entry (desc, &descriptor_list, link) {
122 		if (desc->immediate > 0)
123 			config_rom[i++] = cpu_to_be32(desc->immediate);
124 		config_rom[i] = cpu_to_be32(desc->key | (j - i));
125 		i++;
126 		j += desc->length;
127 	}
128 
129 	/* Update root directory length. */
130 	config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
131 
132 	/* End of root directory, now copy in descriptors. */
133 	list_for_each_entry (desc, &descriptor_list, link) {
134 		for (k = 0; k < desc->length; k++)
135 			config_rom[i + k] = cpu_to_be32(desc->data[k]);
136 		i += desc->length;
137 	}
138 
139 	/* Calculate CRCs for all blocks in the config rom.  This
140 	 * assumes that CRC length and info length are identical for
141 	 * the bus info block, which is always the case for this
142 	 * implementation. */
143 	for (i = 0; i < j; i += length + 1)
144 		length = fw_compute_block_crc(config_rom + i);
145 
146 	WARN_ON(j != config_rom_length);
147 }
148 
update_config_roms(void)149 static void update_config_roms(void)
150 {
151 	struct fw_card *card;
152 
153 	list_for_each_entry (card, &card_list, link) {
154 		generate_config_rom(card, tmp_config_rom);
155 		card->driver->set_config_rom(card, tmp_config_rom,
156 					     config_rom_length);
157 	}
158 }
159 
required_space(struct fw_descriptor * desc)160 static size_t required_space(struct fw_descriptor *desc)
161 {
162 	/* descriptor + entry into root dir + optional immediate entry */
163 	return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
164 }
165 
fw_core_add_descriptor(struct fw_descriptor * desc)166 int fw_core_add_descriptor(struct fw_descriptor *desc)
167 {
168 	size_t i;
169 
170 	/*
171 	 * Check descriptor is valid; the length of all blocks in the
172 	 * descriptor has to add up to exactly the length of the
173 	 * block.
174 	 */
175 	i = 0;
176 	while (i < desc->length)
177 		i += (desc->data[i] >> 16) + 1;
178 
179 	if (i != desc->length)
180 		return -EINVAL;
181 
182 	guard(mutex)(&card_mutex);
183 
184 	if (config_rom_length + required_space(desc) > 256)
185 		return -EBUSY;
186 
187 	list_add_tail(&desc->link, &descriptor_list);
188 	config_rom_length += required_space(desc);
189 	descriptor_count++;
190 	if (desc->immediate > 0)
191 		descriptor_count++;
192 	update_config_roms();
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL(fw_core_add_descriptor);
197 
fw_core_remove_descriptor(struct fw_descriptor * desc)198 void fw_core_remove_descriptor(struct fw_descriptor *desc)
199 {
200 	guard(mutex)(&card_mutex);
201 
202 	list_del(&desc->link);
203 	config_rom_length -= required_space(desc);
204 	descriptor_count--;
205 	if (desc->immediate > 0)
206 		descriptor_count--;
207 	update_config_roms();
208 }
209 EXPORT_SYMBOL(fw_core_remove_descriptor);
210 
reset_bus(struct fw_card * card,bool short_reset)211 static int reset_bus(struct fw_card *card, bool short_reset)
212 {
213 	int reg = short_reset ? 5 : 1;
214 	int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
215 
216 	trace_bus_reset_initiate(card->index, card->generation, short_reset);
217 
218 	return card->driver->update_phy_reg(card, reg, 0, bit);
219 }
220 
fw_schedule_bus_reset(struct fw_card * card,bool delayed,bool short_reset)221 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
222 {
223 	trace_bus_reset_schedule(card->index, card->generation, short_reset);
224 
225 	/* We don't try hard to sort out requests of long vs. short resets. */
226 	card->br_short = short_reset;
227 
228 	/* Use an arbitrary short delay to combine multiple reset requests. */
229 	fw_card_get(card);
230 	if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0))
231 		fw_card_put(card);
232 }
233 EXPORT_SYMBOL(fw_schedule_bus_reset);
234 
br_work(struct work_struct * work)235 static void br_work(struct work_struct *work)
236 {
237 	struct fw_card *card = from_work(card, work, br_work.work);
238 
239 	/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
240 	if (card->reset_jiffies != 0 &&
241 	    time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) {
242 		trace_bus_reset_postpone(card->index, card->generation, card->br_short);
243 
244 		if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2)))
245 			fw_card_put(card);
246 		return;
247 	}
248 
249 	fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
250 			   FW_PHY_CONFIG_CURRENT_GAP_COUNT);
251 	reset_bus(card, card->br_short);
252 	fw_card_put(card);
253 }
254 
allocate_broadcast_channel(struct fw_card * card,int generation)255 static void allocate_broadcast_channel(struct fw_card *card, int generation)
256 {
257 	int channel, bandwidth = 0;
258 
259 	if (!card->broadcast_channel_allocated) {
260 		fw_iso_resource_manage(card, generation, 1ULL << 31,
261 				       &channel, &bandwidth, true);
262 		if (channel != 31) {
263 			fw_notice(card, "failed to allocate broadcast channel\n");
264 			return;
265 		}
266 		card->broadcast_channel_allocated = true;
267 	}
268 
269 	device_for_each_child(card->device, (void *)(long)generation,
270 			      fw_device_set_broadcast_channel);
271 }
272 
fw_schedule_bm_work(struct fw_card * card,unsigned long delay)273 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
274 {
275 	fw_card_get(card);
276 	if (!schedule_delayed_work(&card->bm_work, delay))
277 		fw_card_put(card);
278 }
279 
280 enum bm_contention_outcome {
281 	// The bus management contention window is not expired.
282 	BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0,
283 	// The IRM node has link off.
284 	BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF,
285 	// The IRM node complies IEEE 1394:1994 only.
286 	BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY,
287 	// Another bus reset, BM work has been rescheduled.
288 	BM_CONTENTION_OUTCOME_AT_NEW_GENERATION,
289 	// We have been unable to send the lock request to IRM node due to some local problem.
290 	BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION,
291 	// The lock request failed, maybe the IRM isn't really IRM capable after all.
292 	BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM,
293 	// Somebody else is BM.
294 	BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM,
295 	// The local node succeeds after contending for bus manager.
296 	BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM,
297 };
298 
contend_for_bm(struct fw_card * card)299 static enum bm_contention_outcome contend_for_bm(struct fw_card *card)
300 __must_hold(&card->lock)
301 {
302 	int generation = card->generation;
303 	int local_id = card->local_node->node_id;
304 	__be32 data[2] = {
305 		cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED),
306 		cpu_to_be32(local_id),
307 	};
308 	bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125));
309 	struct fw_node *irm_node;
310 	struct fw_device *irm_device;
311 	int irm_node_id, irm_device_quirks = 0;
312 	int rcode;
313 
314 	lockdep_assert_held(&card->lock);
315 
316 	if (!grace) {
317 		if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate)
318 			return BM_CONTENTION_OUTCOME_WITHIN_WINDOW;
319 	}
320 
321 	irm_node = card->irm_node;
322 	if (!irm_node->link_on) {
323 		fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id);
324 		return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF;
325 	}
326 
327 	// NOTE: It is likely that the quirk detection for IRM device has not done yet.
328 	irm_device = fw_node_get_device(irm_node);
329 	if (irm_device)
330 		irm_device_quirks = READ_ONCE(irm_device->quirks);
331 	if ((irm_device_quirks & FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY) &&
332 	    !(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) {
333 		fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n",
334 			  local_id);
335 		return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
336 	}
337 
338 	irm_node_id = irm_node->node_id;
339 
340 	spin_unlock_irq(&card->lock);
341 
342 	rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation,
343 				   SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data,
344 				   sizeof(data));
345 
346 	spin_lock_irq(&card->lock);
347 
348 	switch (rcode) {
349 	case RCODE_GENERATION:
350 		return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION;
351 	case RCODE_SEND_ERROR:
352 		return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION;
353 	case RCODE_COMPLETE:
354 	{
355 		int bm_id = be32_to_cpu(data[0]);
356 
357 		// Used by cdev layer for "struct fw_cdev_event_bus_reset".
358 		if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
359 			card->bm_node_id = 0xffc0 & bm_id;
360 		else
361 			card->bm_node_id = local_id;
362 
363 		if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
364 			return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM;
365 		else
366 			return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM;
367 	}
368 	default:
369 		if (!(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) {
370 			fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
371 				  fw_rcode_string(rcode), local_id);
372 			return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
373 		} else {
374 			return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM;
375 		}
376 	}
377 }
378 
DEFINE_FREE(node_unref,struct fw_node *,if (_T)fw_node_put (_T))379 DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T))
380 DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T))
381 
382 static void bm_work(struct work_struct *work)
383 {
384 	static const char gap_count_table[] = {
385 		63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
386 	};
387 	struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
388 	struct fw_node *root_node __free(node_unref) = NULL;
389 	int root_id, new_root_id, irm_id, local_id;
390 	int expected_gap_count, generation;
391 	bool stand_for_root = false;
392 
393 	spin_lock_irq(&card->lock);
394 
395 	if (card->local_node == NULL) {
396 		spin_unlock_irq(&card->lock);
397 		return;
398 	}
399 
400 	generation = card->generation;
401 
402 	root_node = fw_node_get(card->root_node);
403 
404 	root_id  = root_node->node_id;
405 	irm_id   = card->irm_node->node_id;
406 	local_id = card->local_node->node_id;
407 
408 	if (card->bm_generation != generation) {
409 		enum bm_contention_outcome result = contend_for_bm(card);
410 
411 		switch (result) {
412 		case BM_CONTENTION_OUTCOME_WITHIN_WINDOW:
413 			spin_unlock_irq(&card->lock);
414 			fw_schedule_bm_work(card, msecs_to_jiffies(125));
415 			return;
416 		case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF:
417 			stand_for_root = true;
418 			break;
419 		case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY:
420 			stand_for_root = true;
421 			break;
422 		case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION:
423 			// BM work has been rescheduled.
424 			spin_unlock_irq(&card->lock);
425 			return;
426 		case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION:
427 			// Let's try again later and hope that the local problem has gone away by
428 			// then.
429 			spin_unlock_irq(&card->lock);
430 			fw_schedule_bm_work(card, msecs_to_jiffies(125));
431 			return;
432 		case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM:
433 			// Let's do a bus reset and pick the local node as root, and thus, IRM.
434 			stand_for_root = true;
435 			break;
436 		case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM:
437 			if (local_id == irm_id) {
438 				// Only acts as IRM.
439 				spin_unlock_irq(&card->lock);
440 				allocate_broadcast_channel(card, generation);
441 				spin_lock_irq(&card->lock);
442 			}
443 			fallthrough;
444 		case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM:
445 		default:
446 			card->bm_generation = generation;
447 			break;
448 		}
449 	}
450 
451 	// We're bus manager for this generation, so next step is to make sure we have an active
452 	// cycle master and do gap count optimization.
453 	if (!stand_for_root) {
454 		if (card->gap_count == GAP_COUNT_MISMATCHED) {
455 			// If self IDs have inconsistent gap counts, do a
456 			// bus reset ASAP. The config rom read might never
457 			// complete, so don't wait for it. However, still
458 			// send a PHY configuration packet prior to the
459 			// bus reset. The PHY configuration packet might
460 			// fail, but 1394-2008 8.4.5.2 explicitly permits
461 			// it in this case, so it should be safe to try.
462 			stand_for_root = true;
463 
464 			// We must always send a bus reset if the gap count
465 			// is inconsistent, so bypass the 5-reset limit.
466 			card->bm_retries = 0;
467 		} else {
468 			// Now investigate root node.
469 			struct fw_device *root_device = fw_node_get_device(root_node);
470 
471 			if (root_device == NULL) {
472 				// Either link_on is false, or we failed to read the
473 				// config rom.  In either case, pick another root.
474 				stand_for_root = true;
475 			} else {
476 				bool root_device_is_running =
477 					atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
478 
479 				if (!root_device_is_running) {
480 					// If we haven't probed this device yet, bail out now
481 					// and let's try again once that's done.
482 					spin_unlock_irq(&card->lock);
483 					return;
484 				} else if (!root_device->cmc) {
485 					// Current root has an active link layer and we
486 					// successfully read the config rom, but it's not
487 					// cycle master capable.
488 					stand_for_root = true;
489 				}
490 			}
491 		}
492 	}
493 
494 	if (stand_for_root) {
495 		new_root_id = local_id;
496 	} else {
497 		// We will send out a force root packet for this node as part of the gap count
498 		// optimization on behalf of the node.
499 		new_root_id = root_id;
500 	}
501 
502 	/*
503 	 * Pick a gap count from 1394a table E-1.  The table doesn't cover
504 	 * the typically much larger 1394b beta repeater delays though.
505 	 */
506 	if (!card->beta_repeaters_present &&
507 	    root_node->max_hops < ARRAY_SIZE(gap_count_table))
508 		expected_gap_count = gap_count_table[root_node->max_hops];
509 	else
510 		expected_gap_count = 63;
511 
512 	// Finally, figure out if we should do a reset or not. If we have done less than 5 resets
513 	// with the same physical topology and we have either a new root or a new gap count
514 	// setting, let's do it.
515 	if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) {
516 		int card_gap_count = card->gap_count;
517 
518 		spin_unlock_irq(&card->lock);
519 
520 		fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
521 			  new_root_id, expected_gap_count);
522 		fw_send_phy_config(card, new_root_id, generation, expected_gap_count);
523 		/*
524 		 * Where possible, use a short bus reset to minimize
525 		 * disruption to isochronous transfers. But in the event
526 		 * of a gap count inconsistency, use a long bus reset.
527 		 *
528 		 * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
529 		 * may set different gap counts after a bus reset. On a mixed
530 		 * 1394/1394a bus, a short bus reset can get doubled. Some
531 		 * nodes may treat the double reset as one bus reset and others
532 		 * may treat it as two, causing a gap count inconsistency
533 		 * again. Using a long bus reset prevents this.
534 		 */
535 		reset_bus(card, card_gap_count != 0);
536 		/* Will allocate broadcast channel after the reset. */
537 	} else {
538 		struct fw_device *root_device = fw_node_get_device(root_node);
539 
540 		spin_unlock_irq(&card->lock);
541 
542 		if (root_device && root_device->cmc) {
543 			// Make sure that the cycle master sends cycle start packets.
544 			__be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR);
545 			int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
546 					root_id, generation, SCODE_100,
547 					CSR_REGISTER_BASE + CSR_STATE_SET,
548 					&data, sizeof(data));
549 			if (rcode == RCODE_GENERATION)
550 				return;
551 		}
552 
553 		if (local_id == irm_id)
554 			allocate_broadcast_channel(card, generation);
555 	}
556 }
557 
fw_card_initialize(struct fw_card * card,const struct fw_card_driver * driver,struct device * device)558 void fw_card_initialize(struct fw_card *card,
559 			const struct fw_card_driver *driver,
560 			struct device *device)
561 {
562 	static atomic_t index = ATOMIC_INIT(-1);
563 
564 	card->index = atomic_inc_return(&index);
565 	card->driver = driver;
566 	card->device = device;
567 
568 	card->transactions.current_tlabel = 0;
569 	card->transactions.tlabel_mask = 0;
570 	INIT_LIST_HEAD(&card->transactions.list);
571 	spin_lock_init(&card->transactions.lock);
572 
573 	spin_lock_init(&card->topology_map.lock);
574 
575 	card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
576 	card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
577 	card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
578 	card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT);
579 	spin_lock_init(&card->split_timeout.lock);
580 
581 	card->color = 0;
582 	card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
583 
584 	kref_init(&card->kref);
585 	init_completion(&card->done);
586 
587 	spin_lock_init(&card->lock);
588 
589 	card->local_node = NULL;
590 
591 	INIT_DELAYED_WORK(&card->br_work, br_work);
592 	INIT_DELAYED_WORK(&card->bm_work, bm_work);
593 }
594 EXPORT_SYMBOL(fw_card_initialize);
595 
DEFINE_FREE(workqueue_destroy,struct workqueue_struct *,if (_T)destroy_workqueue (_T))596 DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T))
597 
598 int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
599 		unsigned int supported_isoc_contexts)
600 {
601 	struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL;
602 	struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL;
603 	int ret;
604 
605 	// This workqueue should be:
606 	//  * != WQ_BH			Sleepable.
607 	//  * == WQ_UNBOUND		Any core can process data for isoc context. The
608 	//				implementation of unit protocol could consumes the core
609 	//				longer somehow.
610 	//  * != WQ_MEM_RECLAIM		Not used for any backend of block device.
611 	//  * == WQ_FREEZABLE		Isochronous communication is at regular interval in real
612 	//				time, thus should be drained if possible at freeze phase.
613 	//  * == WQ_HIGHPRI		High priority to process semi-realtime timestamped data.
614 	//  * == WQ_SYSFS		Parameters are available via sysfs.
615 	//  * max_active == n_it + n_ir	A hardIRQ could notify events for multiple isochronous
616 	//				contexts if they are scheduled to the same cycle.
617 	isoc_wq = alloc_workqueue("firewire-isoc-card%u",
618 				  WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
619 				  supported_isoc_contexts, card->index);
620 	if (!isoc_wq)
621 		return -ENOMEM;
622 
623 	// This workqueue should be:
624 	//  * != WQ_BH			Sleepable.
625 	//  * == WQ_UNBOUND		Any core can process data for asynchronous context.
626 	//  * == WQ_MEM_RECLAIM		Used for any backend of block device.
627 	//  * == WQ_FREEZABLE		The target device would not be available when being freezed.
628 	//  * == WQ_HIGHPRI		High priority to process semi-realtime timestamped data.
629 	//  * == WQ_SYSFS		Parameters are available via sysfs.
630 	//  * max_active == 4		A hardIRQ could notify events for a pair of requests and
631 	//				response AR/AT contexts.
632 	async_wq = alloc_workqueue("firewire-async-card%u",
633 				   WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
634 				   4, card->index);
635 	if (!async_wq)
636 		return -ENOMEM;
637 
638 	card->isoc_wq = isoc_wq;
639 	card->async_wq = async_wq;
640 	card->max_receive = max_receive;
641 	card->link_speed = link_speed;
642 	card->guid = guid;
643 
644 	scoped_guard(mutex, &card_mutex) {
645 		generate_config_rom(card, tmp_config_rom);
646 		ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
647 		if (ret < 0) {
648 			card->isoc_wq = NULL;
649 			card->async_wq = NULL;
650 			return ret;
651 		}
652 		retain_and_null_ptr(isoc_wq);
653 		retain_and_null_ptr(async_wq);
654 
655 		list_add_tail(&card->link, &card_list);
656 	}
657 
658 	return 0;
659 }
660 EXPORT_SYMBOL(fw_card_add);
661 
662 /*
663  * The next few functions implement a dummy driver that is used once a card
664  * driver shuts down an fw_card.  This allows the driver to cleanly unload,
665  * as all IO to the card will be handled (and failed) by the dummy driver
666  * instead of calling into the module.  Only functions for iso context
667  * shutdown still need to be provided by the card driver.
668  *
669  * .read/write_csr() should never be called anymore after the dummy driver
670  * was bound since they are only used within request handler context.
671  * .set_config_rom() is never called since the card is taken out of card_list
672  * before switching to the dummy driver.
673  */
674 
dummy_read_phy_reg(struct fw_card * card,int address)675 static int dummy_read_phy_reg(struct fw_card *card, int address)
676 {
677 	return -ENODEV;
678 }
679 
dummy_update_phy_reg(struct fw_card * card,int address,int clear_bits,int set_bits)680 static int dummy_update_phy_reg(struct fw_card *card, int address,
681 				int clear_bits, int set_bits)
682 {
683 	return -ENODEV;
684 }
685 
dummy_send_request(struct fw_card * card,struct fw_packet * packet)686 static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
687 {
688 	packet->callback(packet, card, RCODE_CANCELLED);
689 }
690 
dummy_send_response(struct fw_card * card,struct fw_packet * packet)691 static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
692 {
693 	packet->callback(packet, card, RCODE_CANCELLED);
694 }
695 
dummy_cancel_packet(struct fw_card * card,struct fw_packet * packet)696 static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
697 {
698 	return -ENOENT;
699 }
700 
dummy_enable_phys_dma(struct fw_card * card,int node_id,int generation)701 static int dummy_enable_phys_dma(struct fw_card *card,
702 				 int node_id, int generation)
703 {
704 	return -ENODEV;
705 }
706 
dummy_allocate_iso_context(struct fw_card * card,int type,int channel,size_t header_size)707 static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
708 				int type, int channel, size_t header_size)
709 {
710 	return ERR_PTR(-ENODEV);
711 }
712 
dummy_read_csr(struct fw_card * card,int csr_offset)713 static u32 dummy_read_csr(struct fw_card *card, int csr_offset)
714 {
715 	return 0;
716 }
717 
dummy_write_csr(struct fw_card * card,int csr_offset,u32 value)718 static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value)
719 {
720 }
721 
dummy_start_iso(struct fw_iso_context * ctx,s32 cycle,u32 sync,u32 tags)722 static int dummy_start_iso(struct fw_iso_context *ctx,
723 			   s32 cycle, u32 sync, u32 tags)
724 {
725 	return -ENODEV;
726 }
727 
dummy_set_iso_channels(struct fw_iso_context * ctx,u64 * channels)728 static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
729 {
730 	return -ENODEV;
731 }
732 
dummy_queue_iso(struct fw_iso_context * ctx,struct fw_iso_packet * p,struct fw_iso_buffer * buffer,unsigned long payload)733 static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
734 			   struct fw_iso_buffer *buffer, unsigned long payload)
735 {
736 	return -ENODEV;
737 }
738 
dummy_flush_queue_iso(struct fw_iso_context * ctx)739 static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
740 {
741 }
742 
dummy_flush_iso_completions(struct fw_iso_context * ctx)743 static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
744 {
745 	return -ENODEV;
746 }
747 
748 static const struct fw_card_driver dummy_driver_template = {
749 	.read_phy_reg		= dummy_read_phy_reg,
750 	.update_phy_reg		= dummy_update_phy_reg,
751 	.send_request		= dummy_send_request,
752 	.send_response		= dummy_send_response,
753 	.cancel_packet		= dummy_cancel_packet,
754 	.enable_phys_dma	= dummy_enable_phys_dma,
755 	.read_csr		= dummy_read_csr,
756 	.write_csr		= dummy_write_csr,
757 	.allocate_iso_context	= dummy_allocate_iso_context,
758 	.start_iso		= dummy_start_iso,
759 	.set_iso_channels	= dummy_set_iso_channels,
760 	.queue_iso		= dummy_queue_iso,
761 	.flush_queue_iso	= dummy_flush_queue_iso,
762 	.flush_iso_completions	= dummy_flush_iso_completions,
763 };
764 
fw_card_release(struct kref * kref)765 void fw_card_release(struct kref *kref)
766 {
767 	struct fw_card *card = container_of(kref, struct fw_card, kref);
768 
769 	complete(&card->done);
770 }
771 EXPORT_SYMBOL_GPL(fw_card_release);
772 
fw_core_remove_card(struct fw_card * card)773 void fw_core_remove_card(struct fw_card *card)
774 {
775 	struct fw_card_driver dummy_driver = dummy_driver_template;
776 
777 	might_sleep();
778 
779 	card->driver->update_phy_reg(card, 4,
780 				     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
781 	fw_schedule_bus_reset(card, false, true);
782 
783 	scoped_guard(mutex, &card_mutex)
784 		list_del_init(&card->link);
785 
786 	/* Switch off most of the card driver interface. */
787 	dummy_driver.free_iso_context	= card->driver->free_iso_context;
788 	dummy_driver.stop_iso		= card->driver->stop_iso;
789 	dummy_driver.disable		= card->driver->disable;
790 	card->driver = &dummy_driver;
791 
792 	drain_workqueue(card->isoc_wq);
793 	drain_workqueue(card->async_wq);
794 	card->driver->disable(card);
795 	fw_cancel_pending_transactions(card);
796 
797 	scoped_guard(spinlock_irqsave, &card->lock)
798 		fw_destroy_nodes(card);
799 
800 	/* Wait for all users, especially device workqueue jobs, to finish. */
801 	fw_card_put(card);
802 	wait_for_completion(&card->done);
803 
804 	destroy_workqueue(card->isoc_wq);
805 	destroy_workqueue(card->async_wq);
806 
807 	WARN_ON(!list_empty(&card->transactions.list));
808 }
809 EXPORT_SYMBOL(fw_core_remove_card);
810 
811 /**
812  * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region
813  *			    for controller card.
814  * @card: The instance of card for 1394 OHCI controller.
815  * @cycle_time: The mutual reference to value of cycle time for the read operation.
816  *
817  * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given
818  * controller card. This function accesses the region without any lock primitives or IRQ mask.
819  * When returning successfully, the content of @value argument has value aligned to host endianness,
820  * formetted by CYCLE_TIME CSR Register of IEEE 1394 std.
821  *
822  * Context: Any context.
823  * Return:
824  * * 0 - Read successfully.
825  * * -ENODEV - The controller is unavailable due to being removed or unbound.
826  */
fw_card_read_cycle_time(struct fw_card * card,u32 * cycle_time)827 int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time)
828 {
829 	if (card->driver->read_csr == dummy_read_csr)
830 		return -ENODEV;
831 
832 	// It's possible to switch to dummy driver between the above and the below. This is the best
833 	// effort to return -ENODEV.
834 	*cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
835 	return 0;
836 }
837 EXPORT_SYMBOL_GPL(fw_card_read_cycle_time);
838