1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Core IEEE1394 transaction logic
4 *
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8 #include <linux/bug.h>
9 #include <linux/completion.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/firewire.h>
13 #include <linux/firewire-constants.h>
14 #include <linux/fs.h>
15 #include <linux/init.h>
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/rculist.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/timer.h>
25 #include <linux/types.h>
26 #include <linux/workqueue.h>
27
28 #include <asm/byteorder.h>
29
30 #include "core.h"
31 #include "packet-header-definitions.h"
32 #include "phy-packet-definitions.h"
33 #include <trace/events/firewire.h>
34
35 #define HEADER_DESTINATION_IS_BROADCAST(header) \
36 ((async_header_get_destination(header) & 0x3f) == 0x3f)
37
38 /* returns 0 if the split timeout handler is already running */
try_cancel_split_timeout(struct fw_transaction * t)39 static int try_cancel_split_timeout(struct fw_transaction *t)
40 {
41 if (t->is_split_transaction)
42 return timer_delete(&t->split_timeout_timer);
43 else
44 return 1;
45 }
46
47 // card->transactions.lock must be acquired in advance.
remove_transaction_entry(struct fw_card * card,struct fw_transaction * entry)48 static void remove_transaction_entry(struct fw_card *card, struct fw_transaction *entry)
49 {
50 list_del_init(&entry->link);
51 card->transactions.tlabel_mask &= ~(1ULL << entry->tlabel);
52 }
53
54 // Must be called without holding card->transactions.lock.
fw_cancel_pending_transactions(struct fw_card * card)55 void fw_cancel_pending_transactions(struct fw_card *card)
56 {
57 struct fw_transaction *t, *tmp;
58 LIST_HEAD(pending_list);
59
60 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
61 // local destination never runs in any type of IRQ context.
62 scoped_guard(spinlock_irqsave, &card->transactions.lock) {
63 list_for_each_entry_safe(t, tmp, &card->transactions.list, link) {
64 if (try_cancel_split_timeout(t))
65 list_move(&t->link, &pending_list);
66 }
67 }
68
69 list_for_each_entry_safe(t, tmp, &pending_list, link) {
70 list_del(&t->link);
71
72 if (!t->with_tstamp) {
73 t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0,
74 t->callback_data);
75 } else {
76 t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, 0,
77 NULL, 0, t->callback_data);
78 }
79 }
80 }
81
82 // card->transactions.lock must be acquired in advance.
83 #define find_and_pop_transaction_entry(card, condition) \
84 ({ \
85 struct fw_transaction *iter, *t = NULL; \
86 list_for_each_entry(iter, &card->transactions.list, link) { \
87 if (condition) { \
88 t = iter; \
89 break; \
90 } \
91 } \
92 if (t && try_cancel_split_timeout(t)) \
93 remove_transaction_entry(card, t); \
94 t; \
95 })
96
close_transaction(struct fw_transaction * transaction,struct fw_card * card,int rcode,u32 response_tstamp)97 static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
98 u32 response_tstamp)
99 {
100 struct fw_transaction *t;
101
102 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
103 // local destination never runs in any type of IRQ context.
104 scoped_guard(spinlock_irqsave, &card->transactions.lock) {
105 t = find_and_pop_transaction_entry(card, iter == transaction);
106 if (!t)
107 return -ENOENT;
108 }
109
110 if (!t->with_tstamp) {
111 t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
112 } else {
113 t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
114 t->callback_data);
115 }
116
117 return 0;
118 }
119
120 /*
121 * Only valid for transactions that are potentially pending (ie have
122 * been sent).
123 */
fw_cancel_transaction(struct fw_card * card,struct fw_transaction * transaction)124 int fw_cancel_transaction(struct fw_card *card,
125 struct fw_transaction *transaction)
126 {
127 u32 tstamp;
128
129 /*
130 * Cancel the packet transmission if it's still queued. That
131 * will call the packet transmission callback which cancels
132 * the transaction.
133 */
134
135 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
136 return 0;
137
138 /*
139 * If the request packet has already been sent, we need to see
140 * if the transaction is still pending and remove it in that case.
141 */
142
143 if (transaction->packet.ack == 0) {
144 // The timestamp is reused since it was just read now.
145 tstamp = transaction->packet.timestamp;
146 } else {
147 u32 curr_cycle_time = 0;
148
149 (void)fw_card_read_cycle_time(card, &curr_cycle_time);
150 tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
151 }
152
153 return close_transaction(transaction, card, RCODE_CANCELLED, tstamp);
154 }
155 EXPORT_SYMBOL(fw_cancel_transaction);
156
split_transaction_timeout_callback(struct timer_list * timer)157 static void split_transaction_timeout_callback(struct timer_list *timer)
158 {
159 struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
160 struct fw_card *card = t->card;
161
162 scoped_guard(spinlock_irqsave, &card->transactions.lock) {
163 if (list_empty(&t->link))
164 return;
165 remove_transaction_entry(card, t);
166 }
167
168 if (!t->with_tstamp) {
169 t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
170 } else {
171 t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp,
172 t->split_timeout_cycle, NULL, 0, t->callback_data);
173 }
174 }
175
start_split_transaction_timeout(struct fw_transaction * t,struct fw_card * card)176 static void start_split_transaction_timeout(struct fw_transaction *t,
177 struct fw_card *card)
178 {
179 unsigned long delta;
180
181 if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
182 return;
183
184 t->is_split_transaction = true;
185
186 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
187 // local destination never runs in any type of IRQ context.
188 scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
189 delta = card->split_timeout.jiffies;
190 mod_timer(&t->split_timeout_timer, jiffies + delta);
191 }
192
193 static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
194
transmit_complete_callback(struct fw_packet * packet,struct fw_card * card,int status)195 static void transmit_complete_callback(struct fw_packet *packet,
196 struct fw_card *card, int status)
197 {
198 struct fw_transaction *t =
199 container_of(packet, struct fw_transaction, packet);
200
201 trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
202 packet->speed, status, packet->timestamp);
203
204 switch (status) {
205 case ACK_COMPLETE:
206 close_transaction(t, card, RCODE_COMPLETE, packet->timestamp);
207 break;
208 case ACK_PENDING:
209 {
210 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
211 // local destination never runs in any type of IRQ context.
212 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
213 t->split_timeout_cycle =
214 compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
215 }
216 start_split_transaction_timeout(t, card);
217 break;
218 }
219 case ACK_BUSY_X:
220 case ACK_BUSY_A:
221 case ACK_BUSY_B:
222 close_transaction(t, card, RCODE_BUSY, packet->timestamp);
223 break;
224 case ACK_DATA_ERROR:
225 close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp);
226 break;
227 case ACK_TYPE_ERROR:
228 close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp);
229 break;
230 default:
231 /*
232 * In this case the ack is really a juju specific
233 * rcode, so just forward that to the callback.
234 */
235 close_transaction(t, card, status, packet->timestamp);
236 break;
237 }
238 }
239
fw_fill_request(struct fw_packet * packet,int tcode,int tlabel,int destination_id,int source_id,int generation,int speed,unsigned long long offset,void * payload,size_t length)240 static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
241 int destination_id, int source_id, int generation, int speed,
242 unsigned long long offset, void *payload, size_t length)
243 {
244 int ext_tcode;
245
246 if (tcode == TCODE_STREAM_DATA) {
247 // The value of destination_id argument should include tag, channel, and sy fields
248 // as isochronous packet header has.
249 packet->header[0] = destination_id;
250 isoc_header_set_data_length(packet->header, length);
251 isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA);
252 packet->header_length = 4;
253 packet->payload = payload;
254 packet->payload_length = length;
255
256 goto common;
257 }
258
259 if (tcode > 0x10) {
260 ext_tcode = tcode & ~0x10;
261 tcode = TCODE_LOCK_REQUEST;
262 } else
263 ext_tcode = 0;
264
265 async_header_set_retry(packet->header, RETRY_X);
266 async_header_set_tlabel(packet->header, tlabel);
267 async_header_set_tcode(packet->header, tcode);
268 async_header_set_destination(packet->header, destination_id);
269 async_header_set_source(packet->header, source_id);
270 async_header_set_offset(packet->header, offset);
271
272 switch (tcode) {
273 case TCODE_WRITE_QUADLET_REQUEST:
274 async_header_set_quadlet_data(packet->header, *(u32 *)payload);
275 packet->header_length = 16;
276 packet->payload_length = 0;
277 break;
278
279 case TCODE_LOCK_REQUEST:
280 case TCODE_WRITE_BLOCK_REQUEST:
281 async_header_set_data_length(packet->header, length);
282 async_header_set_extended_tcode(packet->header, ext_tcode);
283 packet->header_length = 16;
284 packet->payload = payload;
285 packet->payload_length = length;
286 break;
287
288 case TCODE_READ_QUADLET_REQUEST:
289 packet->header_length = 12;
290 packet->payload_length = 0;
291 break;
292
293 case TCODE_READ_BLOCK_REQUEST:
294 async_header_set_data_length(packet->header, length);
295 async_header_set_extended_tcode(packet->header, ext_tcode);
296 packet->header_length = 16;
297 packet->payload_length = 0;
298 break;
299
300 default:
301 WARN(1, "wrong tcode %d\n", tcode);
302 }
303 common:
304 packet->speed = speed;
305 packet->generation = generation;
306 packet->ack = 0;
307 packet->payload_mapped = false;
308 }
309
allocate_tlabel(struct fw_card * card)310 static int allocate_tlabel(struct fw_card *card)
311 __must_hold(&card->transactions.lock)
312 {
313 int tlabel;
314
315 lockdep_assert_held(&card->transactions.lock);
316
317 tlabel = card->transactions.current_tlabel;
318 while (card->transactions.tlabel_mask & (1ULL << tlabel)) {
319 tlabel = (tlabel + 1) & 0x3f;
320 if (tlabel == card->transactions.current_tlabel)
321 return -EBUSY;
322 }
323
324 card->transactions.current_tlabel = (tlabel + 1) & 0x3f;
325 card->transactions.tlabel_mask |= 1ULL << tlabel;
326
327 return tlabel;
328 }
329
330 /**
331 * __fw_send_request() - submit a request packet for transmission to generate callback for response
332 * subaction with or without time stamp.
333 * @card: interface to send the request at
334 * @t: transaction instance to which the request belongs
335 * @tcode: transaction code
336 * @destination_id: destination node ID, consisting of bus_ID and phy_ID
337 * @generation: bus generation in which request and response are valid
338 * @speed: transmission speed
339 * @offset: 48bit wide offset into destination's address space
340 * @payload: data payload for the request subaction
341 * @length: length of the payload, in bytes
342 * @callback: union of two functions whether to receive time stamp or not for response
343 * subaction.
344 * @with_tstamp: Whether to receive time stamp or not for response subaction.
345 * @callback_data: data to be passed to the transaction completion callback
346 *
347 * Submit a request packet into the asynchronous request transmission queue.
348 * Can be called from atomic context. If you prefer a blocking API, use
349 * fw_run_transaction() in a context that can sleep.
350 *
351 * In case of lock requests, specify one of the firewire-core specific %TCODE_
352 * constants instead of %TCODE_LOCK_REQUEST in @tcode.
353 *
354 * Make sure that the value in @destination_id is not older than the one in
355 * @generation. Otherwise the request is in danger to be sent to a wrong node.
356 *
357 * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
358 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
359 * It will contain tag, channel, and sy data instead of a node ID then.
360 *
361 * The payload buffer at @data is going to be DMA-mapped except in case of
362 * @length <= 8 or of local (loopback) requests. Hence make sure that the
363 * buffer complies with the restrictions of the streaming DMA mapping API.
364 * @payload must not be freed before the @callback is called.
365 *
366 * In case of request types without payload, @data is NULL and @length is 0.
367 *
368 * After the transaction is completed successfully or unsuccessfully, the
369 * @callback will be called. Among its parameters is the response code which
370 * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
371 * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
372 * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
373 * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
374 * generation, or missing ACK respectively.
375 *
376 * Note some timing corner cases: fw_send_request() may complete much earlier
377 * than when the request packet actually hits the wire. On the other hand,
378 * transaction completion and hence execution of @callback may happen even
379 * before fw_send_request() returns.
380 */
__fw_send_request(struct fw_card * card,struct fw_transaction * t,int tcode,int destination_id,int generation,int speed,unsigned long long offset,void * payload,size_t length,union fw_transaction_callback callback,bool with_tstamp,void * callback_data)381 void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
382 int destination_id, int generation, int speed, unsigned long long offset,
383 void *payload, size_t length, union fw_transaction_callback callback,
384 bool with_tstamp, void *callback_data)
385 {
386 int tlabel;
387
388 /*
389 * Allocate tlabel from the bitmap and put the transaction on
390 * the list while holding the card spinlock.
391 */
392
393 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
394 // local destination never runs in any type of IRQ context.
395 scoped_guard(spinlock_irqsave, &card->transactions.lock)
396 tlabel = allocate_tlabel(card);
397 if (tlabel < 0) {
398 if (!with_tstamp) {
399 callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
400 } else {
401 // Timestamping on behalf of hardware.
402 u32 curr_cycle_time = 0;
403 u32 tstamp;
404
405 (void)fw_card_read_cycle_time(card, &curr_cycle_time);
406 tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
407
408 callback.with_tstamp(card, RCODE_SEND_ERROR, tstamp, tstamp, NULL, 0,
409 callback_data);
410 }
411 return;
412 }
413
414 t->node_id = destination_id;
415 t->tlabel = tlabel;
416 t->card = card;
417 t->is_split_transaction = false;
418 timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0);
419 t->callback = callback;
420 t->with_tstamp = with_tstamp;
421 t->callback_data = callback_data;
422 t->packet.callback = transmit_complete_callback;
423
424 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
425 // local destination never runs in any type of IRQ context.
426 scoped_guard(spinlock_irqsave, &card->lock) {
427 // The node_id field of fw_card can be updated when handling SelfIDComplete.
428 fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id,
429 generation, speed, offset, payload, length);
430 }
431
432 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
433 // local destination never runs in any type of IRQ context.
434 scoped_guard(spinlock_irqsave, &card->transactions.lock)
435 list_add_tail(&t->link, &card->transactions.list);
436
437 // Safe with no lock, since the index field of fw_card is immutable once assigned.
438 trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
439 t->packet.header, payload,
440 tcode_is_read_request(tcode) ? 0 : length / 4);
441
442 card->driver->send_request(card, &t->packet);
443 }
444 EXPORT_SYMBOL_GPL(__fw_send_request);
445
446 struct transaction_callback_data {
447 struct completion done;
448 void *payload;
449 int rcode;
450 };
451
transaction_callback(struct fw_card * card,int rcode,void * payload,size_t length,void * data)452 static void transaction_callback(struct fw_card *card, int rcode,
453 void *payload, size_t length, void *data)
454 {
455 struct transaction_callback_data *d = data;
456
457 if (rcode == RCODE_COMPLETE)
458 memcpy(d->payload, payload, length);
459 d->rcode = rcode;
460 complete(&d->done);
461 }
462
463 /**
464 * fw_run_transaction() - send request and sleep until transaction is completed
465 * @card: card interface for this request
466 * @tcode: transaction code
467 * @destination_id: destination node ID, consisting of bus_ID and phy_ID
468 * @generation: bus generation in which request and response are valid
469 * @speed: transmission speed
470 * @offset: 48bit wide offset into destination's address space
471 * @payload: data payload for the request subaction
472 * @length: length of the payload, in bytes
473 *
474 * Returns the RCODE. See fw_send_request() for parameter documentation.
475 * Unlike fw_send_request(), @data points to the payload of the request or/and
476 * to the payload of the response. DMA mapping restrictions apply to outbound
477 * request payloads of >= 8 bytes but not to inbound response payloads.
478 */
fw_run_transaction(struct fw_card * card,int tcode,int destination_id,int generation,int speed,unsigned long long offset,void * payload,size_t length)479 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
480 int generation, int speed, unsigned long long offset,
481 void *payload, size_t length)
482 {
483 struct transaction_callback_data d;
484 struct fw_transaction t;
485
486 timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
487 init_completion(&d.done);
488 d.payload = payload;
489 fw_send_request(card, &t, tcode, destination_id, generation, speed,
490 offset, payload, length, transaction_callback, &d);
491 wait_for_completion(&d.done);
492 timer_destroy_on_stack(&t.split_timeout_timer);
493
494 return d.rcode;
495 }
496 EXPORT_SYMBOL(fw_run_transaction);
497
498 static DEFINE_MUTEX(phy_config_mutex);
499 static DECLARE_COMPLETION(phy_config_done);
500
transmit_phy_packet_callback(struct fw_packet * packet,struct fw_card * card,int status)501 static void transmit_phy_packet_callback(struct fw_packet *packet,
502 struct fw_card *card, int status)
503 {
504 trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
505 packet->timestamp);
506 complete(&phy_config_done);
507 }
508
509 static struct fw_packet phy_config_packet = {
510 .header_length = 12,
511 .payload_length = 0,
512 .speed = SCODE_100,
513 .callback = transmit_phy_packet_callback,
514 };
515
fw_send_phy_config(struct fw_card * card,int node_id,int generation,int gap_count)516 void fw_send_phy_config(struct fw_card *card,
517 int node_id, int generation, int gap_count)
518 {
519 long timeout = msecs_to_jiffies(100);
520 u32 data = 0;
521
522 phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
523
524 if (node_id != FW_PHY_CONFIG_NO_NODE_ID) {
525 phy_packet_phy_config_set_root_id(&data, node_id);
526 phy_packet_phy_config_set_force_root_node(&data, true);
527 }
528
529 if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
530 gap_count = card->driver->read_phy_reg(card, 1);
531 if (gap_count < 0)
532 return;
533
534 gap_count &= 63;
535 if (gap_count == 63)
536 return;
537 }
538 phy_packet_phy_config_set_gap_count(&data, gap_count);
539 phy_packet_phy_config_set_gap_count_optimization(&data, true);
540
541 guard(mutex)(&phy_config_mutex);
542
543 async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
544 phy_config_packet.header[1] = data;
545 phy_config_packet.header[2] = ~data;
546 phy_config_packet.generation = generation;
547 reinit_completion(&phy_config_done);
548
549 trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
550 phy_config_packet.generation, phy_config_packet.header[1],
551 phy_config_packet.header[2]);
552
553 card->driver->send_request(card, &phy_config_packet);
554 wait_for_completion_timeout(&phy_config_done, timeout);
555 }
556
lookup_overlapping_address_handler(struct list_head * list,unsigned long long offset,size_t length)557 static struct fw_address_handler *lookup_overlapping_address_handler(
558 struct list_head *list, unsigned long long offset, size_t length)
559 {
560 struct fw_address_handler *handler;
561
562 list_for_each_entry_rcu(handler, list, link) {
563 if (handler->offset < offset + length &&
564 offset < handler->offset + handler->length)
565 return handler;
566 }
567
568 return NULL;
569 }
570
is_enclosing_handler(struct fw_address_handler * handler,unsigned long long offset,size_t length)571 static bool is_enclosing_handler(struct fw_address_handler *handler,
572 unsigned long long offset, size_t length)
573 {
574 return handler->offset <= offset &&
575 offset + length <= handler->offset + handler->length;
576 }
577
lookup_enclosing_address_handler(struct list_head * list,unsigned long long offset,size_t length)578 static struct fw_address_handler *lookup_enclosing_address_handler(
579 struct list_head *list, unsigned long long offset, size_t length)
580 {
581 struct fw_address_handler *handler;
582
583 list_for_each_entry_rcu(handler, list, link) {
584 if (is_enclosing_handler(handler, offset, length))
585 return handler;
586 }
587
588 return NULL;
589 }
590
591 static DEFINE_SPINLOCK(address_handler_list_lock);
592 static LIST_HEAD(address_handler_list);
593
594 const struct fw_address_region fw_high_memory_region =
595 { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
596 EXPORT_SYMBOL(fw_high_memory_region);
597
598 static const struct fw_address_region low_memory_region =
599 { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
600
601 #if 0
602 const struct fw_address_region fw_private_region =
603 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
604 const struct fw_address_region fw_csr_region =
605 { .start = CSR_REGISTER_BASE,
606 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
607 const struct fw_address_region fw_unit_space_region =
608 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
609 #endif /* 0 */
610
complete_address_handler(struct kref * kref)611 static void complete_address_handler(struct kref *kref)
612 {
613 struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
614
615 complete(&handler->done);
616 }
617
get_address_handler(struct fw_address_handler * handler)618 static void get_address_handler(struct fw_address_handler *handler)
619 {
620 kref_get(&handler->kref);
621 }
622
put_address_handler(struct fw_address_handler * handler)623 static int put_address_handler(struct fw_address_handler *handler)
624 {
625 return kref_put(&handler->kref, complete_address_handler);
626 }
627
628 /**
629 * fw_core_add_address_handler() - register for incoming requests
630 * @handler: callback
631 * @region: region in the IEEE 1212 node space address range
632 *
633 * region->start, ->end, and handler->length have to be quadlet-aligned.
634 *
635 * When a request is received that falls within the specified address range, the specified callback
636 * is invoked. The parameters passed to the callback give the details of the particular request.
637 * The callback is invoked in the workqueue context in most cases. However, if the request is
638 * initiated by the local node, the callback is invoked in the initiator's context.
639 *
640 * To be called in process context.
641 * Return value: 0 on success, non-zero otherwise.
642 *
643 * The start offset of the handler's address region is determined by
644 * fw_core_add_address_handler() and is returned in handler->offset.
645 *
646 * Address allocations are exclusive, except for the FCP registers.
647 */
fw_core_add_address_handler(struct fw_address_handler * handler,const struct fw_address_region * region)648 int fw_core_add_address_handler(struct fw_address_handler *handler,
649 const struct fw_address_region *region)
650 {
651 struct fw_address_handler *other;
652 int ret = -EBUSY;
653
654 if (region->start & 0xffff000000000003ULL ||
655 region->start >= region->end ||
656 region->end > 0x0001000000000000ULL ||
657 handler->length & 3 ||
658 handler->length == 0)
659 return -EINVAL;
660
661 guard(spinlock)(&address_handler_list_lock);
662
663 handler->offset = region->start;
664 while (handler->offset + handler->length <= region->end) {
665 if (is_in_fcp_region(handler->offset, handler->length))
666 other = NULL;
667 else
668 other = lookup_overlapping_address_handler
669 (&address_handler_list,
670 handler->offset, handler->length);
671 if (other != NULL) {
672 handler->offset += other->length;
673 } else {
674 init_completion(&handler->done);
675 kref_init(&handler->kref);
676 list_add_tail_rcu(&handler->link, &address_handler_list);
677 ret = 0;
678 break;
679 }
680 }
681
682 return ret;
683 }
684 EXPORT_SYMBOL(fw_core_add_address_handler);
685
686 /**
687 * fw_core_remove_address_handler() - unregister an address handler
688 * @handler: callback
689 *
690 * To be called in process context.
691 *
692 * When fw_core_remove_address_handler() returns, @handler->callback() is
693 * guaranteed to not run on any CPU anymore.
694 */
fw_core_remove_address_handler(struct fw_address_handler * handler)695 void fw_core_remove_address_handler(struct fw_address_handler *handler)
696 {
697 scoped_guard(spinlock, &address_handler_list_lock)
698 list_del_rcu(&handler->link);
699
700 synchronize_rcu();
701
702 if (!put_address_handler(handler))
703 wait_for_completion(&handler->done);
704 }
705 EXPORT_SYMBOL(fw_core_remove_address_handler);
706
707 struct fw_request {
708 struct kref kref;
709 struct fw_packet response;
710 u32 request_header[ASYNC_HEADER_QUADLET_COUNT];
711 int ack;
712 u32 timestamp;
713 u32 length;
714 u32 data[];
715 };
716
fw_request_get(struct fw_request * request)717 void fw_request_get(struct fw_request *request)
718 {
719 kref_get(&request->kref);
720 }
721
release_request(struct kref * kref)722 static void release_request(struct kref *kref)
723 {
724 struct fw_request *request = container_of(kref, struct fw_request, kref);
725
726 kfree(request);
727 }
728
fw_request_put(struct fw_request * request)729 void fw_request_put(struct fw_request *request)
730 {
731 kref_put(&request->kref, release_request);
732 }
733
free_response_callback(struct fw_packet * packet,struct fw_card * card,int status)734 static void free_response_callback(struct fw_packet *packet,
735 struct fw_card *card, int status)
736 {
737 struct fw_request *request = container_of(packet, struct fw_request, response);
738
739 trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
740 packet->speed, status, packet->timestamp);
741
742 // Decrease the reference count since not at in-flight.
743 fw_request_put(request);
744
745 // Decrease the reference count to release the object.
746 fw_request_put(request);
747 }
748
fw_get_response_length(struct fw_request * r)749 int fw_get_response_length(struct fw_request *r)
750 {
751 int tcode, ext_tcode, data_length;
752
753 tcode = async_header_get_tcode(r->request_header);
754
755 switch (tcode) {
756 case TCODE_WRITE_QUADLET_REQUEST:
757 case TCODE_WRITE_BLOCK_REQUEST:
758 return 0;
759
760 case TCODE_READ_QUADLET_REQUEST:
761 return 4;
762
763 case TCODE_READ_BLOCK_REQUEST:
764 data_length = async_header_get_data_length(r->request_header);
765 return data_length;
766
767 case TCODE_LOCK_REQUEST:
768 ext_tcode = async_header_get_extended_tcode(r->request_header);
769 data_length = async_header_get_data_length(r->request_header);
770 switch (ext_tcode) {
771 case EXTCODE_FETCH_ADD:
772 case EXTCODE_LITTLE_ADD:
773 return data_length;
774 default:
775 return data_length / 2;
776 }
777
778 default:
779 WARN(1, "wrong tcode %d\n", tcode);
780 return 0;
781 }
782 }
783
fw_fill_response(struct fw_packet * response,u32 * request_header,int rcode,void * payload,size_t length)784 void fw_fill_response(struct fw_packet *response, u32 *request_header,
785 int rcode, void *payload, size_t length)
786 {
787 int tcode, tlabel, extended_tcode, source, destination;
788
789 tcode = async_header_get_tcode(request_header);
790 tlabel = async_header_get_tlabel(request_header);
791 source = async_header_get_destination(request_header); // Exchange.
792 destination = async_header_get_source(request_header); // Exchange.
793 extended_tcode = async_header_get_extended_tcode(request_header);
794
795 async_header_set_retry(response->header, RETRY_1);
796 async_header_set_tlabel(response->header, tlabel);
797 async_header_set_destination(response->header, destination);
798 async_header_set_source(response->header, source);
799 async_header_set_rcode(response->header, rcode);
800 response->header[2] = 0; // The field is reserved.
801
802 switch (tcode) {
803 case TCODE_WRITE_QUADLET_REQUEST:
804 case TCODE_WRITE_BLOCK_REQUEST:
805 async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE);
806 response->header_length = 12;
807 response->payload_length = 0;
808 break;
809
810 case TCODE_READ_QUADLET_REQUEST:
811 async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE);
812 if (payload != NULL)
813 async_header_set_quadlet_data(response->header, *(u32 *)payload);
814 else
815 async_header_set_quadlet_data(response->header, 0);
816 response->header_length = 16;
817 response->payload_length = 0;
818 break;
819
820 case TCODE_READ_BLOCK_REQUEST:
821 case TCODE_LOCK_REQUEST:
822 async_header_set_tcode(response->header, tcode + 2);
823 async_header_set_data_length(response->header, length);
824 async_header_set_extended_tcode(response->header, extended_tcode);
825 response->header_length = 16;
826 response->payload = payload;
827 response->payload_length = length;
828 break;
829
830 default:
831 WARN(1, "wrong tcode %d\n", tcode);
832 }
833
834 response->payload_mapped = false;
835 }
836 EXPORT_SYMBOL(fw_fill_response);
837
compute_split_timeout_timestamp(struct fw_card * card,u32 request_timestamp)838 static u32 compute_split_timeout_timestamp(struct fw_card *card,
839 u32 request_timestamp)
840 __must_hold(&card->split_timeout.lock)
841 {
842 unsigned int cycles;
843 u32 timestamp;
844
845 lockdep_assert_held(&card->split_timeout.lock);
846
847 cycles = card->split_timeout.cycles;
848 cycles += request_timestamp & 0x1fff;
849
850 timestamp = request_timestamp & ~0x1fff;
851 timestamp += (cycles / 8000) << 13;
852 timestamp |= cycles % 8000;
853
854 return timestamp;
855 }
856
allocate_request(struct fw_card * card,struct fw_packet * p)857 static struct fw_request *allocate_request(struct fw_card *card,
858 struct fw_packet *p)
859 {
860 struct fw_request *request;
861 u32 *data, length;
862 int request_tcode;
863
864 request_tcode = async_header_get_tcode(p->header);
865 switch (request_tcode) {
866 case TCODE_WRITE_QUADLET_REQUEST:
867 data = &p->header[3];
868 length = 4;
869 break;
870
871 case TCODE_WRITE_BLOCK_REQUEST:
872 case TCODE_LOCK_REQUEST:
873 data = p->payload;
874 length = async_header_get_data_length(p->header);
875 break;
876
877 case TCODE_READ_QUADLET_REQUEST:
878 data = NULL;
879 length = 4;
880 break;
881
882 case TCODE_READ_BLOCK_REQUEST:
883 data = NULL;
884 length = async_header_get_data_length(p->header);
885 break;
886
887 default:
888 fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
889 p->header[0], p->header[1], p->header[2]);
890 return NULL;
891 }
892
893 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
894 if (request == NULL)
895 return NULL;
896 kref_init(&request->kref);
897
898 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
899 // local destination never runs in any type of IRQ context.
900 scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
901 request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp);
902
903 request->response.speed = p->speed;
904 request->response.generation = p->generation;
905 request->response.ack = 0;
906 request->response.callback = free_response_callback;
907 request->ack = p->ack;
908 request->timestamp = p->timestamp;
909 request->length = length;
910 if (data)
911 memcpy(request->data, data, length);
912
913 memcpy(request->request_header, p->header, sizeof(p->header));
914
915 return request;
916 }
917
918 /**
919 * fw_send_response: - send response packet for asynchronous transaction.
920 * @card: interface to send the response at.
921 * @request: firewire request data for the transaction.
922 * @rcode: response code to send.
923 *
924 * Submit a response packet into the asynchronous response transmission queue. The @request
925 * is going to be released when the transmission successfully finishes later.
926 */
fw_send_response(struct fw_card * card,struct fw_request * request,int rcode)927 void fw_send_response(struct fw_card *card,
928 struct fw_request *request, int rcode)
929 {
930 u32 *data = NULL;
931 unsigned int data_length = 0;
932
933 /* unified transaction or broadcast transaction: don't respond */
934 if (request->ack != ACK_PENDING ||
935 HEADER_DESTINATION_IS_BROADCAST(request->request_header)) {
936 fw_request_put(request);
937 return;
938 }
939
940 if (rcode == RCODE_COMPLETE) {
941 data = request->data;
942 data_length = fw_get_response_length(request);
943 }
944
945 fw_fill_response(&request->response, request->request_header, rcode, data, data_length);
946
947 // Increase the reference count so that the object is kept during in-flight.
948 fw_request_get(request);
949
950 trace_async_response_outbound_initiate((uintptr_t)request, card->index,
951 request->response.generation, request->response.speed,
952 request->response.header, data,
953 data ? data_length / 4 : 0);
954
955 card->driver->send_response(card, &request->response);
956 }
957 EXPORT_SYMBOL(fw_send_response);
958
959 /**
960 * fw_get_request_speed() - returns speed at which the @request was received
961 * @request: firewire request data
962 */
fw_get_request_speed(struct fw_request * request)963 int fw_get_request_speed(struct fw_request *request)
964 {
965 return request->response.speed;
966 }
967 EXPORT_SYMBOL(fw_get_request_speed);
968
969 /**
970 * fw_request_get_timestamp: Get timestamp of the request.
971 * @request: The opaque pointer to request structure.
972 *
973 * Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The
974 * timestamp consists of the low order 3 bits of second field and the full 13 bits of count
975 * field of isochronous cycle time register.
976 *
977 * Returns: timestamp of the request.
978 */
fw_request_get_timestamp(const struct fw_request * request)979 u32 fw_request_get_timestamp(const struct fw_request *request)
980 {
981 return request->timestamp;
982 }
983 EXPORT_SYMBOL_GPL(fw_request_get_timestamp);
984
handle_exclusive_region_request(struct fw_card * card,struct fw_packet * p,struct fw_request * request,unsigned long long offset)985 static void handle_exclusive_region_request(struct fw_card *card,
986 struct fw_packet *p,
987 struct fw_request *request,
988 unsigned long long offset)
989 {
990 struct fw_address_handler *handler;
991 int tcode, destination, source;
992
993 destination = async_header_get_destination(p->header);
994 source = async_header_get_source(p->header);
995 tcode = async_header_get_tcode(p->header);
996 if (tcode == TCODE_LOCK_REQUEST)
997 tcode = 0x10 + async_header_get_extended_tcode(p->header);
998
999 scoped_guard(rcu) {
1000 handler = lookup_enclosing_address_handler(&address_handler_list, offset,
1001 request->length);
1002 if (handler)
1003 get_address_handler(handler);
1004 }
1005
1006 if (!handler) {
1007 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1008 return;
1009 }
1010
1011 // Outside the RCU read-side critical section. Without spinlock. With reference count.
1012 handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
1013 request->data, request->length, handler->callback_data);
1014 put_address_handler(handler);
1015 }
1016
1017 // To use kmalloc allocator efficiently, this should be power of two.
1018 #define BUFFER_ON_KERNEL_STACK_SIZE 4
1019
handle_fcp_region_request(struct fw_card * card,struct fw_packet * p,struct fw_request * request,unsigned long long offset)1020 static void handle_fcp_region_request(struct fw_card *card,
1021 struct fw_packet *p,
1022 struct fw_request *request,
1023 unsigned long long offset)
1024 {
1025 struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
1026 struct fw_address_handler *handler, **handlers;
1027 int tcode, destination, source, i, count, buffer_size;
1028
1029 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
1030 offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
1031 request->length > 0x200) {
1032 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1033
1034 return;
1035 }
1036
1037 tcode = async_header_get_tcode(p->header);
1038 destination = async_header_get_destination(p->header);
1039 source = async_header_get_source(p->header);
1040
1041 if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
1042 tcode != TCODE_WRITE_BLOCK_REQUEST) {
1043 fw_send_response(card, request, RCODE_TYPE_ERROR);
1044
1045 return;
1046 }
1047
1048 count = 0;
1049 handlers = buffer_on_kernel_stack;
1050 buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
1051 scoped_guard(rcu) {
1052 list_for_each_entry_rcu(handler, &address_handler_list, link) {
1053 if (is_enclosing_handler(handler, offset, request->length)) {
1054 if (count >= buffer_size) {
1055 int next_size = buffer_size * 2;
1056 struct fw_address_handler **buffer_on_kernel_heap;
1057
1058 if (handlers == buffer_on_kernel_stack)
1059 buffer_on_kernel_heap = NULL;
1060 else
1061 buffer_on_kernel_heap = handlers;
1062
1063 buffer_on_kernel_heap =
1064 krealloc_array(buffer_on_kernel_heap, next_size,
1065 sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
1066 // FCP is used for purposes unrelated to significant system
1067 // resources (e.g. storage or networking), so allocation
1068 // failures are not considered so critical.
1069 if (!buffer_on_kernel_heap)
1070 break;
1071
1072 if (handlers == buffer_on_kernel_stack) {
1073 memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
1074 sizeof(buffer_on_kernel_stack));
1075 }
1076
1077 handlers = buffer_on_kernel_heap;
1078 buffer_size = next_size;
1079 }
1080 get_address_handler(handler);
1081 handlers[count++] = handler;
1082 }
1083 }
1084 }
1085
1086 for (i = 0; i < count; ++i) {
1087 handler = handlers[i];
1088 handler->address_callback(card, request, tcode, destination, source,
1089 p->generation, offset, request->data,
1090 request->length, handler->callback_data);
1091 put_address_handler(handler);
1092 }
1093
1094 if (handlers != buffer_on_kernel_stack)
1095 kfree(handlers);
1096
1097 fw_send_response(card, request, RCODE_COMPLETE);
1098 }
1099
fw_core_handle_request(struct fw_card * card,struct fw_packet * p)1100 void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
1101 {
1102 struct fw_request *request;
1103 unsigned long long offset;
1104 unsigned int tcode;
1105
1106 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
1107 return;
1108
1109 tcode = async_header_get_tcode(p->header);
1110 if (tcode_is_link_internal(tcode)) {
1111 trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
1112 p->header[1], p->header[2]);
1113 fw_cdev_handle_phy_packet(card, p);
1114 return;
1115 }
1116
1117 request = allocate_request(card, p);
1118 if (request == NULL) {
1119 /* FIXME: send statically allocated busy packet. */
1120 return;
1121 }
1122
1123 trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
1124 p->ack, p->timestamp, p->header, request->data,
1125 tcode_is_read_request(tcode) ? 0 : request->length / 4);
1126
1127 offset = async_header_get_offset(p->header);
1128
1129 if (!is_in_fcp_region(offset, request->length))
1130 handle_exclusive_region_request(card, p, request, offset);
1131 else
1132 handle_fcp_region_request(card, p, request, offset);
1133
1134 }
1135 EXPORT_SYMBOL(fw_core_handle_request);
1136
fw_core_handle_response(struct fw_card * card,struct fw_packet * p)1137 void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
1138 {
1139 struct fw_transaction *t = NULL;
1140 u32 *data;
1141 size_t data_length;
1142 int tcode, tlabel, source, rcode;
1143
1144 tcode = async_header_get_tcode(p->header);
1145 tlabel = async_header_get_tlabel(p->header);
1146 source = async_header_get_source(p->header);
1147 rcode = async_header_get_rcode(p->header);
1148
1149 // FIXME: sanity check packet, is length correct, does tcodes
1150 // and addresses match to the transaction request queried later.
1151 //
1152 // For the tracepoints event, let us decode the header here against the concern.
1153
1154 switch (tcode) {
1155 case TCODE_READ_QUADLET_RESPONSE:
1156 data = (u32 *) &p->header[3];
1157 data_length = 4;
1158 break;
1159
1160 case TCODE_WRITE_RESPONSE:
1161 data = NULL;
1162 data_length = 0;
1163 break;
1164
1165 case TCODE_READ_BLOCK_RESPONSE:
1166 case TCODE_LOCK_RESPONSE:
1167 data = p->payload;
1168 data_length = async_header_get_data_length(p->header);
1169 break;
1170
1171 default:
1172 /* Should never happen, this is just to shut up gcc. */
1173 data = NULL;
1174 data_length = 0;
1175 break;
1176 }
1177
1178 // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
1179 // local destination never runs in any type of IRQ context.
1180 scoped_guard(spinlock_irqsave, &card->transactions.lock) {
1181 t = find_and_pop_transaction_entry(card,
1182 iter->node_id == source && iter->tlabel == tlabel);
1183 }
1184
1185 trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
1186 p->timestamp, p->header, data, data_length / 4);
1187
1188 if (!t) {
1189 fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
1190 source, tlabel);
1191 return;
1192 }
1193
1194 /*
1195 * The response handler may be executed while the request handler
1196 * is still pending. Cancel the request handler.
1197 */
1198 card->driver->cancel_packet(card, &t->packet);
1199
1200 if (!t->with_tstamp) {
1201 t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data);
1202 } else {
1203 t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data,
1204 data_length, t->callback_data);
1205 }
1206 }
1207 EXPORT_SYMBOL(fw_core_handle_response);
1208
1209 /**
1210 * fw_rcode_string - convert a firewire result code to an error description
1211 * @rcode: the result code
1212 */
fw_rcode_string(int rcode)1213 const char *fw_rcode_string(int rcode)
1214 {
1215 static const char *const names[] = {
1216 [RCODE_COMPLETE] = "no error",
1217 [RCODE_CONFLICT_ERROR] = "conflict error",
1218 [RCODE_DATA_ERROR] = "data error",
1219 [RCODE_TYPE_ERROR] = "type error",
1220 [RCODE_ADDRESS_ERROR] = "address error",
1221 [RCODE_SEND_ERROR] = "send error",
1222 [RCODE_CANCELLED] = "timeout",
1223 [RCODE_BUSY] = "busy",
1224 [RCODE_GENERATION] = "bus reset",
1225 [RCODE_NO_ACK] = "no ack",
1226 };
1227
1228 if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
1229 return names[rcode];
1230 else
1231 return "unknown";
1232 }
1233 EXPORT_SYMBOL(fw_rcode_string);
1234
1235 static const struct fw_address_region topology_map_region =
1236 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
1237 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
1238
handle_topology_map(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)1239 static void handle_topology_map(struct fw_card *card, struct fw_request *request,
1240 int tcode, int destination, int source, int generation,
1241 unsigned long long offset, void *payload, size_t length,
1242 void *callback_data)
1243 {
1244 int start;
1245
1246 if (!tcode_is_read_request(tcode)) {
1247 fw_send_response(card, request, RCODE_TYPE_ERROR);
1248 return;
1249 }
1250
1251 if ((offset & 3) > 0 || (length & 3) > 0) {
1252 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1253 return;
1254 }
1255
1256 start = (offset - topology_map_region.start) / 4;
1257
1258 // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
1259 // destination never runs in any type of IRQ context.
1260 scoped_guard(spinlock_irqsave, &card->topology_map.lock)
1261 memcpy(payload, &card->topology_map.buffer[start], length);
1262
1263 fw_send_response(card, request, RCODE_COMPLETE);
1264 }
1265
1266 static struct fw_address_handler topology_map = {
1267 .length = 0x400,
1268 .address_callback = handle_topology_map,
1269 };
1270
1271 static const struct fw_address_region registers_region =
1272 { .start = CSR_REGISTER_BASE,
1273 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
1274
update_split_timeout(struct fw_card * card)1275 static void update_split_timeout(struct fw_card *card)
1276 __must_hold(&card->split_timeout.lock)
1277 {
1278 unsigned int cycles;
1279
1280 cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19);
1281
1282 /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
1283 cycles = clamp(cycles, 800u, 3u * 8000u);
1284
1285 card->split_timeout.cycles = cycles;
1286 card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles);
1287 }
1288
handle_registers(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)1289 static void handle_registers(struct fw_card *card, struct fw_request *request,
1290 int tcode, int destination, int source, int generation,
1291 unsigned long long offset, void *payload, size_t length,
1292 void *callback_data)
1293 {
1294 int reg = offset & ~CSR_REGISTER_BASE;
1295 __be32 *data = payload;
1296 int rcode = RCODE_COMPLETE;
1297
1298 switch (reg) {
1299 case CSR_PRIORITY_BUDGET:
1300 if (!card->priority_budget_implemented) {
1301 rcode = RCODE_ADDRESS_ERROR;
1302 break;
1303 }
1304 fallthrough;
1305
1306 case CSR_NODE_IDS:
1307 /*
1308 * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
1309 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
1310 */
1311 fallthrough;
1312
1313 case CSR_STATE_CLEAR:
1314 case CSR_STATE_SET:
1315 case CSR_CYCLE_TIME:
1316 case CSR_BUS_TIME:
1317 case CSR_BUSY_TIMEOUT:
1318 if (tcode == TCODE_READ_QUADLET_REQUEST)
1319 *data = cpu_to_be32(card->driver->read_csr(card, reg));
1320 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1321 card->driver->write_csr(card, reg, be32_to_cpu(*data));
1322 else
1323 rcode = RCODE_TYPE_ERROR;
1324 break;
1325
1326 case CSR_RESET_START:
1327 if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1328 card->driver->write_csr(card, CSR_STATE_CLEAR,
1329 CSR_STATE_BIT_ABDICATE);
1330 else
1331 rcode = RCODE_TYPE_ERROR;
1332 break;
1333
1334 case CSR_SPLIT_TIMEOUT_HI:
1335 if (tcode == TCODE_READ_QUADLET_REQUEST) {
1336 *data = cpu_to_be32(card->split_timeout.hi);
1337 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1338 // NOTE: This can be without irqsave when we can guarantee that
1339 // __fw_send_request() for local destination never runs in any type of IRQ
1340 // context.
1341 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
1342 card->split_timeout.hi = be32_to_cpu(*data) & 7;
1343 update_split_timeout(card);
1344 }
1345 } else {
1346 rcode = RCODE_TYPE_ERROR;
1347 }
1348 break;
1349
1350 case CSR_SPLIT_TIMEOUT_LO:
1351 if (tcode == TCODE_READ_QUADLET_REQUEST) {
1352 *data = cpu_to_be32(card->split_timeout.lo);
1353 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1354 // NOTE: This can be without irqsave when we can guarantee that
1355 // __fw_send_request() for local destination never runs in any type of IRQ
1356 // context.
1357 scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
1358 card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000;
1359 update_split_timeout(card);
1360 }
1361 } else {
1362 rcode = RCODE_TYPE_ERROR;
1363 }
1364 break;
1365
1366 case CSR_MAINT_UTILITY:
1367 if (tcode == TCODE_READ_QUADLET_REQUEST)
1368 *data = card->maint_utility_register;
1369 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1370 card->maint_utility_register = *data;
1371 else
1372 rcode = RCODE_TYPE_ERROR;
1373 break;
1374
1375 case CSR_BROADCAST_CHANNEL:
1376 if (tcode == TCODE_READ_QUADLET_REQUEST)
1377 *data = cpu_to_be32(card->broadcast_channel);
1378 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1379 card->broadcast_channel =
1380 (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
1381 BROADCAST_CHANNEL_INITIAL;
1382 else
1383 rcode = RCODE_TYPE_ERROR;
1384 break;
1385
1386 case CSR_BUS_MANAGER_ID:
1387 case CSR_BANDWIDTH_AVAILABLE:
1388 case CSR_CHANNELS_AVAILABLE_HI:
1389 case CSR_CHANNELS_AVAILABLE_LO:
1390 /*
1391 * FIXME: these are handled by the OHCI hardware and
1392 * the stack never sees these request. If we add
1393 * support for a new type of controller that doesn't
1394 * handle this in hardware we need to deal with these
1395 * transactions.
1396 */
1397 BUG();
1398 break;
1399
1400 default:
1401 rcode = RCODE_ADDRESS_ERROR;
1402 break;
1403 }
1404
1405 fw_send_response(card, request, rcode);
1406 }
1407
1408 static struct fw_address_handler registers = {
1409 .length = 0x400,
1410 .address_callback = handle_registers,
1411 };
1412
handle_low_memory(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)1413 static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1414 int tcode, int destination, int source, int generation,
1415 unsigned long long offset, void *payload, size_t length,
1416 void *callback_data)
1417 {
1418 /*
1419 * This catches requests not handled by the physical DMA unit,
1420 * i.e., wrong transaction types or unauthorized source nodes.
1421 */
1422 fw_send_response(card, request, RCODE_TYPE_ERROR);
1423 }
1424
1425 static struct fw_address_handler low_memory = {
1426 .length = FW_MAX_PHYSICAL_RANGE,
1427 .address_callback = handle_low_memory,
1428 };
1429
1430 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1431 MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1432 MODULE_LICENSE("GPL");
1433
1434 static const u32 vendor_textual_descriptor[] = {
1435 /* textual descriptor leaf () */
1436 0x00060000,
1437 0x00000000,
1438 0x00000000,
1439 0x4c696e75, /* L i n u */
1440 0x78204669, /* x F i */
1441 0x72657769, /* r e w i */
1442 0x72650000, /* r e */
1443 };
1444
1445 static const u32 model_textual_descriptor[] = {
1446 /* model descriptor leaf () */
1447 0x00030000,
1448 0x00000000,
1449 0x00000000,
1450 0x4a756a75, /* J u j u */
1451 };
1452
1453 static struct fw_descriptor vendor_id_descriptor = {
1454 .length = ARRAY_SIZE(vendor_textual_descriptor),
1455 .immediate = 0x03001f11,
1456 .key = 0x81000000,
1457 .data = vendor_textual_descriptor,
1458 };
1459
1460 static struct fw_descriptor model_id_descriptor = {
1461 .length = ARRAY_SIZE(model_textual_descriptor),
1462 .immediate = 0x17023901,
1463 .key = 0x81000000,
1464 .data = model_textual_descriptor,
1465 };
1466
fw_core_init(void)1467 static int __init fw_core_init(void)
1468 {
1469 int ret;
1470
1471 fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM | WQ_UNBOUND,
1472 0);
1473 if (!fw_workqueue)
1474 return -ENOMEM;
1475
1476 ret = bus_register(&fw_bus_type);
1477 if (ret < 0) {
1478 destroy_workqueue(fw_workqueue);
1479 return ret;
1480 }
1481
1482 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
1483 if (fw_cdev_major < 0) {
1484 bus_unregister(&fw_bus_type);
1485 destroy_workqueue(fw_workqueue);
1486 return fw_cdev_major;
1487 }
1488
1489 fw_core_add_address_handler(&topology_map, &topology_map_region);
1490 fw_core_add_address_handler(®isters, ®isters_region);
1491 fw_core_add_address_handler(&low_memory, &low_memory_region);
1492 fw_core_add_descriptor(&vendor_id_descriptor);
1493 fw_core_add_descriptor(&model_id_descriptor);
1494
1495 return 0;
1496 }
1497
fw_core_cleanup(void)1498 static void __exit fw_core_cleanup(void)
1499 {
1500 unregister_chrdev(fw_cdev_major, "firewire");
1501 bus_unregister(&fw_bus_type);
1502 destroy_workqueue(fw_workqueue);
1503 xa_destroy(&fw_device_xa);
1504 }
1505
1506 module_init(fw_core_init);
1507 module_exit(fw_core_cleanup);
1508