1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for OHCI 1394 controllers
4 *
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/compiler.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/firewire.h>
15 #include <linux/firewire-constants.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/vmalloc.h>
32 #include <linux/workqueue.h>
33
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36
37 #ifdef CONFIG_PPC_PMAC
38 #include <asm/pmac_feature.h>
39 #endif
40
41 #include "core.h"
42 #include "ohci.h"
43 #include "packet-header-definitions.h"
44 #include "phy-packet-definitions.h"
45
46 #include <trace/events/firewire.h>
47
48 static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/firewire_ohci.h>
52
53 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
54 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
55
56 #define DESCRIPTOR_OUTPUT_MORE 0
57 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
58 #define DESCRIPTOR_INPUT_MORE (2 << 12)
59 #define DESCRIPTOR_INPUT_LAST (3 << 12)
60 #define DESCRIPTOR_STATUS (1 << 11)
61 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
62 #define DESCRIPTOR_PING (1 << 7)
63 #define DESCRIPTOR_YY (1 << 6)
64 #define DESCRIPTOR_NO_IRQ (0 << 4)
65 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
66 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
67 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
68 #define DESCRIPTOR_WAIT (3 << 0)
69
70 #define DESCRIPTOR_CMD (0xf << 12)
71
72 struct descriptor {
73 __le16 req_count;
74 __le16 control;
75 __le32 data_address;
76 __le32 branch_address;
77 __le16 res_count;
78 __le16 transfer_status;
79 } __aligned(16);
80
81 #define CONTROL_SET(regs) (regs)
82 #define CONTROL_CLEAR(regs) ((regs) + 4)
83 #define COMMAND_PTR(regs) ((regs) + 12)
84 #define CONTEXT_MATCH(regs) ((regs) + 16)
85
86 #define AR_BUFFER_SIZE (32*1024)
87 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
88 /* we need at least two pages for proper list management */
89 #define AR_BUFFERS MAX(2, AR_BUFFERS_MIN)
90
91 #define MAX_ASYNC_PAYLOAD 4096
92 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
93 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
94
95 struct ar_context {
96 struct fw_ohci *ohci;
97 struct page *pages[AR_BUFFERS];
98 void *buffer;
99 dma_addr_t dma_addrs[AR_BUFFERS];
100 struct descriptor *descriptors;
101 dma_addr_t descriptors_bus;
102 void *pointer;
103 unsigned int last_buffer_index;
104 u32 regs;
105 struct work_struct work;
106 };
107
108 struct context;
109
110 typedef int (*descriptor_callback_t)(struct context *ctx,
111 struct descriptor *d,
112 struct descriptor *last);
113
114 /*
115 * A buffer that contains a block of DMA-able coherent memory used for
116 * storing a portion of a DMA descriptor program.
117 */
118 struct descriptor_buffer {
119 struct list_head list;
120 dma_addr_t buffer_bus;
121 size_t buffer_size;
122 size_t used;
123 struct descriptor buffer[];
124 };
125
126 struct context {
127 struct fw_ohci *ohci;
128 u32 regs;
129 int total_allocation;
130 u32 current_bus;
131 bool running;
132
133 /*
134 * List of page-sized buffers for storing DMA descriptors.
135 * Head of list contains buffers in use and tail of list contains
136 * free buffers.
137 */
138 struct list_head buffer_list;
139
140 /*
141 * Pointer to a buffer inside buffer_list that contains the tail
142 * end of the current DMA program.
143 */
144 struct descriptor_buffer *buffer_tail;
145
146 /*
147 * The descriptor containing the branch address of the first
148 * descriptor that has not yet been filled by the device.
149 */
150 struct descriptor *last;
151
152 /*
153 * The last descriptor block in the DMA program. It contains the branch
154 * address that must be updated upon appending a new descriptor.
155 */
156 struct descriptor *prev;
157 int prev_z;
158
159 descriptor_callback_t callback;
160 };
161
162 struct at_context {
163 struct context context;
164 struct work_struct work;
165 bool flushing;
166 };
167
168 struct iso_context {
169 struct fw_iso_context base;
170 struct context context;
171 unsigned long flushing_completions;
172 u8 sync;
173 u8 tags;
174 union {
175 struct {
176 u16 last_timestamp;
177 size_t header_length;
178 void *header;
179 } sc;
180 struct {
181 u32 buffer_bus;
182 u16 completed;
183 } mc;
184 };
185 };
186
187 #define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
188
189 struct fw_ohci {
190 struct fw_card card;
191
192 __iomem char *registers;
193 int node_id;
194 int generation;
195 int request_generation; /* for timestamping incoming requests */
196 unsigned quirks;
197 unsigned int pri_req_max;
198 u32 bus_time;
199 bool bus_time_running;
200 bool is_root;
201 bool csr_state_setclear_abdicate;
202 int n_ir;
203 int n_it;
204 /*
205 * Spinlock for accessing fw_ohci data. Never call out of
206 * this driver with this lock held.
207 */
208 spinlock_t lock;
209
210 struct mutex phy_reg_mutex;
211
212 void *misc_buffer;
213 dma_addr_t misc_buffer_bus;
214
215 struct ar_context ar_request_ctx;
216 struct ar_context ar_response_ctx;
217 struct at_context at_request_ctx;
218 struct at_context at_response_ctx;
219
220 u32 it_context_support;
221 u32 it_context_mask; /* unoccupied IT contexts */
222 struct iso_context *it_context_list;
223 u64 ir_context_channels; /* unoccupied channels */
224 u32 ir_context_support;
225 u32 ir_context_mask; /* unoccupied IR contexts */
226 struct iso_context *ir_context_list;
227 u64 mc_channels; /* channels in use by the multichannel IR context */
228 bool mc_allocated;
229
230 __be32 *config_rom;
231 dma_addr_t config_rom_bus;
232 __be32 *next_config_rom;
233 dma_addr_t next_config_rom_bus;
234 __be32 next_header;
235
236 __le32 *self_id;
237 dma_addr_t self_id_bus;
238
239 u32 self_id_buffer[512];
240 };
241
fw_ohci(struct fw_card * card)242 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
243 {
244 return container_of(card, struct fw_ohci, card);
245 }
246
247 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
248 #define IR_CONTEXT_BUFFER_FILL 0x80000000
249 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
250 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
251 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
252 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
253
254 #define CONTEXT_RUN 0x8000
255 #define CONTEXT_WAKE 0x1000
256 #define CONTEXT_DEAD 0x0800
257 #define CONTEXT_ACTIVE 0x0400
258
259 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
260 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
261 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
262
263 #define OHCI1394_REGISTER_SIZE 0x800
264 #define OHCI1394_PCI_HCI_Control 0x40
265 #define SELF_ID_BUF_SIZE 0x800
266 #define OHCI_VERSION_1_1 0x010010
267
268 static char ohci_driver_name[] = KBUILD_MODNAME;
269
270 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
271 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
272 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
273 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
274 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
275 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
276 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
277 #define PCI_DEVICE_ID_VIA_VT630X 0x3044
278 #define PCI_REV_ID_VIA_VT6306 0x46
279 #define PCI_DEVICE_ID_VIA_VT6315 0x3403
280
281 #define QUIRK_CYCLE_TIMER 0x1
282 #define QUIRK_RESET_PACKET 0x2
283 #define QUIRK_BE_HEADERS 0x4
284 #define QUIRK_NO_1394A 0x8
285 #define QUIRK_NO_MSI 0x10
286 #define QUIRK_TI_SLLZ059 0x20
287 #define QUIRK_IR_WAKE 0x40
288
289 // On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
290 // ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
291 // (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
292 // clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
293 // while it is probable due to detection of any type of PCIe error.
294 #define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
295
296 #if IS_ENABLED(CONFIG_X86)
297
has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci * ohci)298 static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
299 {
300 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
301 }
302
303 #define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
304
detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev * pdev)305 static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
306 {
307 const struct pci_dev *pcie_to_pci_bridge;
308
309 // Detect any type of AMD Ryzen machine.
310 if (!static_cpu_has(X86_FEATURE_ZEN))
311 return false;
312
313 // Detect VIA VT6306/6307/6308.
314 if (pdev->vendor != PCI_VENDOR_ID_VIA)
315 return false;
316 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
317 return false;
318
319 // Detect Asmedia ASM1083/1085.
320 pcie_to_pci_bridge = pdev->bus->self;
321 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
322 return false;
323 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
324 return false;
325
326 return true;
327 }
328
329 #else
330 #define has_reboot_by_cycle_timer_read_quirk(ohci) false
331 #define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
332 #endif
333
334 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
335 static const struct {
336 unsigned short vendor, device, revision, flags;
337 } ohci_quirks[] = {
338 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
339 QUIRK_CYCLE_TIMER},
340
341 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
342 QUIRK_BE_HEADERS},
343
344 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
345 QUIRK_NO_MSI},
346
347 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
348 QUIRK_RESET_PACKET},
349
350 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
351 QUIRK_NO_MSI},
352
353 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
354 QUIRK_CYCLE_TIMER},
355
356 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
357 QUIRK_NO_MSI},
358
359 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
360 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
361
362 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
363 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
364
365 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
366 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
367
368 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
369 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
370
371 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
372 QUIRK_RESET_PACKET},
373
374 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
375 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
376
377 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
378 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
379
380 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
381 QUIRK_NO_MSI},
382
383 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
384 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
385 };
386
387 /* This overrides anything that was found in ohci_quirks[]. */
388 static int param_quirks;
389 module_param_named(quirks, param_quirks, int, 0644);
390 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
391 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
392 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
393 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
394 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
395 ", disable MSI = " __stringify(QUIRK_NO_MSI)
396 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
397 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
398 ")");
399
400 static bool param_remote_dma;
401 module_param_named(remote_dma, param_remote_dma, bool, 0444);
402 MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
403
reg_write(const struct fw_ohci * ohci,int offset,u32 data)404 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
405 {
406 writel(data, ohci->registers + offset);
407 }
408
reg_read(const struct fw_ohci * ohci,int offset)409 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
410 {
411 return readl(ohci->registers + offset);
412 }
413
flush_writes(const struct fw_ohci * ohci)414 static inline void flush_writes(const struct fw_ohci *ohci)
415 {
416 /* Do a dummy read to flush writes. */
417 reg_read(ohci, OHCI1394_Version);
418 }
419
420 /*
421 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
422 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
423 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
424 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
425 */
read_phy_reg(struct fw_ohci * ohci,int addr)426 static int read_phy_reg(struct fw_ohci *ohci, int addr)
427 {
428 u32 val;
429 int i;
430
431 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
432 for (i = 0; i < 3 + 100; i++) {
433 val = reg_read(ohci, OHCI1394_PhyControl);
434 if (!~val)
435 return -ENODEV; /* Card was ejected. */
436
437 if (val & OHCI1394_PhyControl_ReadDone)
438 return OHCI1394_PhyControl_ReadData(val);
439
440 /*
441 * Try a few times without waiting. Sleeping is necessary
442 * only when the link/PHY interface is busy.
443 */
444 if (i >= 3)
445 msleep(1);
446 }
447 ohci_err(ohci, "failed to read phy reg %d\n", addr);
448 dump_stack();
449
450 return -EBUSY;
451 }
452
write_phy_reg(const struct fw_ohci * ohci,int addr,u32 val)453 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
454 {
455 int i;
456
457 reg_write(ohci, OHCI1394_PhyControl,
458 OHCI1394_PhyControl_Write(addr, val));
459 for (i = 0; i < 3 + 100; i++) {
460 val = reg_read(ohci, OHCI1394_PhyControl);
461 if (!~val)
462 return -ENODEV; /* Card was ejected. */
463
464 if (!(val & OHCI1394_PhyControl_WritePending))
465 return 0;
466
467 if (i >= 3)
468 msleep(1);
469 }
470 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
471 dump_stack();
472
473 return -EBUSY;
474 }
475
update_phy_reg(struct fw_ohci * ohci,int addr,int clear_bits,int set_bits)476 static int update_phy_reg(struct fw_ohci *ohci, int addr,
477 int clear_bits, int set_bits)
478 {
479 int ret = read_phy_reg(ohci, addr);
480 if (ret < 0)
481 return ret;
482
483 /*
484 * The interrupt status bits are cleared by writing a one bit.
485 * Avoid clearing them unless explicitly requested in set_bits.
486 */
487 if (addr == 5)
488 clear_bits |= PHY_INT_STATUS_BITS;
489
490 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
491 }
492
read_paged_phy_reg(struct fw_ohci * ohci,int page,int addr)493 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
494 {
495 int ret;
496
497 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
498 if (ret < 0)
499 return ret;
500
501 return read_phy_reg(ohci, addr);
502 }
503
ohci_read_phy_reg(struct fw_card * card,int addr)504 static int ohci_read_phy_reg(struct fw_card *card, int addr)
505 {
506 struct fw_ohci *ohci = fw_ohci(card);
507
508 guard(mutex)(&ohci->phy_reg_mutex);
509
510 return read_phy_reg(ohci, addr);
511 }
512
ohci_update_phy_reg(struct fw_card * card,int addr,int clear_bits,int set_bits)513 static int ohci_update_phy_reg(struct fw_card *card, int addr,
514 int clear_bits, int set_bits)
515 {
516 struct fw_ohci *ohci = fw_ohci(card);
517
518 guard(mutex)(&ohci->phy_reg_mutex);
519
520 return update_phy_reg(ohci, addr, clear_bits, set_bits);
521 }
522
ar_context_link_page(struct ar_context * ctx,unsigned int index)523 static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
524 {
525 struct descriptor *d;
526
527 d = &ctx->descriptors[index];
528 d->branch_address &= cpu_to_le32(~0xf);
529 d->res_count = cpu_to_le16(PAGE_SIZE);
530 d->transfer_status = 0;
531
532 wmb(); /* finish init of new descriptors before branch_address update */
533 d = &ctx->descriptors[ctx->last_buffer_index];
534 d->branch_address |= cpu_to_le32(1);
535
536 ctx->last_buffer_index = index;
537
538 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
539 }
540
ar_context_release(struct ar_context * ctx)541 static void ar_context_release(struct ar_context *ctx)
542 {
543 struct device *dev = ctx->ohci->card.device;
544
545 if (!ctx->buffer)
546 return;
547
548 for (int i = 0; i < AR_BUFFERS; ++i) {
549 dma_addr_t dma_addr = ctx->dma_addrs[i];
550 if (dma_addr)
551 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
552 }
553 memset(ctx->dma_addrs, 0, sizeof(ctx->dma_addrs));
554
555 vunmap(ctx->buffer);
556 ctx->buffer = NULL;
557
558 release_pages(ctx->pages, AR_BUFFERS);
559 memset(ctx->pages, 0, sizeof(ctx->pages));
560 }
561
ar_context_abort(struct ar_context * ctx,const char * error_msg)562 static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
563 {
564 struct fw_ohci *ohci = ctx->ohci;
565
566 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
567 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
568 flush_writes(ohci);
569
570 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
571 }
572 /* FIXME: restart? */
573 }
574
ar_next_buffer_index(unsigned int index)575 static inline unsigned int ar_next_buffer_index(unsigned int index)
576 {
577 return (index + 1) % AR_BUFFERS;
578 }
579
ar_first_buffer_index(struct ar_context * ctx)580 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
581 {
582 return ar_next_buffer_index(ctx->last_buffer_index);
583 }
584
585 /*
586 * We search for the buffer that contains the last AR packet DMA data written
587 * by the controller.
588 */
ar_search_last_active_buffer(struct ar_context * ctx,unsigned int * buffer_offset)589 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
590 unsigned int *buffer_offset)
591 {
592 unsigned int i, next_i, last = ctx->last_buffer_index;
593 __le16 res_count, next_res_count;
594
595 i = ar_first_buffer_index(ctx);
596 res_count = READ_ONCE(ctx->descriptors[i].res_count);
597
598 /* A buffer that is not yet completely filled must be the last one. */
599 while (i != last && res_count == 0) {
600
601 /* Peek at the next descriptor. */
602 next_i = ar_next_buffer_index(i);
603 rmb(); /* read descriptors in order */
604 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
605 /*
606 * If the next descriptor is still empty, we must stop at this
607 * descriptor.
608 */
609 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
610 /*
611 * The exception is when the DMA data for one packet is
612 * split over three buffers; in this case, the middle
613 * buffer's descriptor might be never updated by the
614 * controller and look still empty, and we have to peek
615 * at the third one.
616 */
617 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
618 next_i = ar_next_buffer_index(next_i);
619 rmb();
620 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
621 if (next_res_count != cpu_to_le16(PAGE_SIZE))
622 goto next_buffer_is_active;
623 }
624
625 break;
626 }
627
628 next_buffer_is_active:
629 i = next_i;
630 res_count = next_res_count;
631 }
632
633 rmb(); /* read res_count before the DMA data */
634
635 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
636 if (*buffer_offset > PAGE_SIZE) {
637 *buffer_offset = 0;
638 ar_context_abort(ctx, "corrupted descriptor");
639 }
640
641 return i;
642 }
643
ar_sync_buffers_for_cpu(struct ar_context * ctx,unsigned int end_buffer_index,unsigned int end_buffer_offset)644 static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
645 unsigned int end_buffer_index,
646 unsigned int end_buffer_offset)
647 {
648 unsigned int i;
649
650 i = ar_first_buffer_index(ctx);
651 while (i != end_buffer_index) {
652 dma_sync_single_for_cpu(ctx->ohci->card.device, ctx->dma_addrs[i], PAGE_SIZE,
653 DMA_FROM_DEVICE);
654 i = ar_next_buffer_index(i);
655 }
656 if (end_buffer_offset > 0)
657 dma_sync_single_for_cpu(ctx->ohci->card.device, ctx->dma_addrs[i],
658 end_buffer_offset, DMA_FROM_DEVICE);
659 }
660
661 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
cond_le32_to_cpu(__le32 value,bool has_be_header_quirk)662 static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
663 {
664 return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value);
665 }
666
has_be_header_quirk(const struct fw_ohci * ohci)667 static bool has_be_header_quirk(const struct fw_ohci *ohci)
668 {
669 return !!(ohci->quirks & QUIRK_BE_HEADERS);
670 }
671 #else
cond_le32_to_cpu(__le32 value,bool has_be_header_quirk __maybe_unused)672 static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
673 {
674 return le32_to_cpu(value);
675 }
676
has_be_header_quirk(const struct fw_ohci * ohci)677 static bool has_be_header_quirk(const struct fw_ohci *ohci)
678 {
679 return false;
680 }
681 #endif
682
handle_ar_packet(struct ar_context * ctx,__le32 * buffer)683 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
684 {
685 struct fw_ohci *ohci = ctx->ohci;
686 struct fw_packet p;
687 u32 status, length, tcode;
688 int evt;
689
690 p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
691 p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
692 p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
693
694 tcode = async_header_get_tcode(p.header);
695 switch (tcode) {
696 case TCODE_WRITE_QUADLET_REQUEST:
697 case TCODE_READ_QUADLET_RESPONSE:
698 p.header[3] = (__force __u32) buffer[3];
699 p.header_length = 16;
700 p.payload_length = 0;
701 break;
702
703 case TCODE_READ_BLOCK_REQUEST :
704 p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
705 p.header_length = 16;
706 p.payload_length = 0;
707 break;
708
709 case TCODE_WRITE_BLOCK_REQUEST:
710 case TCODE_READ_BLOCK_RESPONSE:
711 case TCODE_LOCK_REQUEST:
712 case TCODE_LOCK_RESPONSE:
713 p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
714 p.header_length = 16;
715 p.payload_length = async_header_get_data_length(p.header);
716 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
717 ar_context_abort(ctx, "invalid packet length");
718 return NULL;
719 }
720 break;
721
722 case TCODE_WRITE_RESPONSE:
723 case TCODE_READ_QUADLET_REQUEST:
724 case TCODE_LINK_INTERNAL:
725 p.header_length = 12;
726 p.payload_length = 0;
727 break;
728
729 default:
730 ar_context_abort(ctx, "invalid tcode");
731 return NULL;
732 }
733
734 p.payload = (void *) buffer + p.header_length;
735
736 /* FIXME: What to do about evt_* errors? */
737 length = (p.header_length + p.payload_length + 3) / 4;
738 status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
739 evt = (status >> 16) & 0x1f;
740
741 p.ack = evt - 16;
742 p.speed = (status >> 21) & 0x7;
743 p.timestamp = status & 0xffff;
744 p.generation = ohci->request_generation;
745
746 /*
747 * Several controllers, notably from NEC and VIA, forget to
748 * write ack_complete status at PHY packet reception.
749 */
750 if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
751 p.ack = ACK_COMPLETE;
752
753 /*
754 * The OHCI bus reset handler synthesizes a PHY packet with
755 * the new generation number when a bus reset happens (see
756 * section 8.4.2.3). This helps us determine when a request
757 * was received and make sure we send the response in the same
758 * generation. We only need this for requests; for responses
759 * we use the unique tlabel for finding the matching
760 * request.
761 *
762 * Alas some chips sometimes emit bus reset packets with a
763 * wrong generation. We set the correct generation for these
764 * at a slightly incorrect time (in handle_selfid_complete_event).
765 */
766 if (evt == OHCI1394_evt_bus_reset) {
767 if (!(ohci->quirks & QUIRK_RESET_PACKET))
768 ohci->request_generation = (p.header[2] >> 16) & 0xff;
769 } else if (ctx == &ohci->ar_request_ctx) {
770 fw_core_handle_request(&ohci->card, &p);
771 } else {
772 fw_core_handle_response(&ohci->card, &p);
773 }
774
775 return buffer + length + 1;
776 }
777
handle_ar_packets(struct ar_context * ctx,void * p,void * end)778 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
779 {
780 void *next;
781
782 while (p < end) {
783 next = handle_ar_packet(ctx, p);
784 if (!next)
785 return p;
786 p = next;
787 }
788
789 return p;
790 }
791
ar_recycle_buffers(struct ar_context * ctx,unsigned int end_buffer)792 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
793 {
794 unsigned int i;
795
796 i = ar_first_buffer_index(ctx);
797 while (i != end_buffer) {
798 dma_sync_single_for_device(ctx->ohci->card.device, ctx->dma_addrs[i], PAGE_SIZE,
799 DMA_FROM_DEVICE);
800 ar_context_link_page(ctx, i);
801 i = ar_next_buffer_index(i);
802 }
803 }
804
ohci_ar_context_work(struct work_struct * work)805 static void ohci_ar_context_work(struct work_struct *work)
806 {
807 struct ar_context *ctx = from_work(ctx, work, work);
808 unsigned int end_buffer_index, end_buffer_offset;
809 void *p, *end;
810
811 p = ctx->pointer;
812 if (!p)
813 return;
814
815 end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
816 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
817 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
818
819 if (end_buffer_index < ar_first_buffer_index(ctx)) {
820 // The filled part of the overall buffer wraps around; handle all packets up to the
821 // buffer end here. If the last packet wraps around, its tail will be visible after
822 // the buffer end because the buffer start pages are mapped there again.
823 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
824 p = handle_ar_packets(ctx, p, buffer_end);
825 if (p < buffer_end)
826 goto error;
827 // adjust p to point back into the actual buffer
828 p -= AR_BUFFERS * PAGE_SIZE;
829 }
830
831 p = handle_ar_packets(ctx, p, end);
832 if (p != end) {
833 if (p > end)
834 ar_context_abort(ctx, "inconsistent descriptor");
835 goto error;
836 }
837
838 ctx->pointer = p;
839 ar_recycle_buffers(ctx, end_buffer_index);
840
841 return;
842 error:
843 ctx->pointer = NULL;
844 }
845
ar_context_init(struct ar_context * ctx,struct fw_ohci * ohci,unsigned int descriptors_offset,u32 regs)846 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
847 unsigned int descriptors_offset, u32 regs)
848 {
849 struct device *dev = ohci->card.device;
850 unsigned int i;
851 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES] = { NULL };
852 dma_addr_t dma_addrs[AR_BUFFERS];
853 void *vaddr;
854 struct descriptor *d;
855
856 ctx->regs = regs;
857 ctx->ohci = ohci;
858 INIT_WORK(&ctx->work, ohci_ar_context_work);
859
860 // Retrieve noncontiguous pages. The descriptors for 1394 OHCI AR DMA contexts have a set
861 // of address and length per each. The reason to use pages is to construct contiguous
862 // address range in kernel virtual address space.
863 unsigned long nr_populated = alloc_pages_bulk(GFP_KERNEL | GFP_DMA32, AR_BUFFERS, pages);
864
865 if (nr_populated != AR_BUFFERS) {
866 release_pages(pages, nr_populated);
867 return -ENOMEM;
868 }
869
870 // Map the pages into contiguous kernel virtual addresses so that the packet data
871 // across the pages can be referred as being contiguous, especially across the last
872 // and first pages.
873 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
874 pages[AR_BUFFERS + i] = pages[i];
875 vaddr = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
876 if (!vaddr) {
877 release_pages(pages, nr_populated);
878 return -ENOMEM;
879 }
880
881 // Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
882 // coherency for the pages by hand.
883 for (i = 0; i < AR_BUFFERS; i++) {
884 // The dma_map_phys() with a physical address per page is available here, instead.
885 dma_addr_t dma_addr = dma_map_page(dev, pages[i], 0, PAGE_SIZE, DMA_FROM_DEVICE);
886 if (dma_mapping_error(dev, dma_addr))
887 break;
888 dma_addrs[i] = dma_addr;
889 dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
890 }
891 if (i < AR_BUFFERS) {
892 while (i-- > 0)
893 dma_unmap_page(dev, dma_addrs[i], PAGE_SIZE, DMA_FROM_DEVICE);
894 vunmap(vaddr);
895 release_pages(pages, nr_populated);
896 return -ENOMEM;
897 }
898
899 memcpy(ctx->dma_addrs, dma_addrs, sizeof(ctx->dma_addrs));
900 ctx->buffer = vaddr;
901 memcpy(ctx->pages, pages, sizeof(ctx->pages));
902
903 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
904 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
905
906 for (i = 0; i < AR_BUFFERS; i++) {
907 d = &ctx->descriptors[i];
908 d->req_count = cpu_to_le16(PAGE_SIZE);
909 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
910 DESCRIPTOR_STATUS |
911 DESCRIPTOR_BRANCH_ALWAYS);
912 d->data_address = cpu_to_le32(ctx->dma_addrs[i]);
913 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
914 ar_next_buffer_index(i) * sizeof(struct descriptor));
915 }
916
917 return 0;
918 }
919
ar_context_run(struct ar_context * ctx)920 static void ar_context_run(struct ar_context *ctx)
921 {
922 unsigned int i;
923
924 for (i = 0; i < AR_BUFFERS; i++)
925 ar_context_link_page(ctx, i);
926
927 ctx->pointer = ctx->buffer;
928
929 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
930 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
931 }
932
find_branch_descriptor(struct descriptor * d,int z)933 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
934 {
935 __le16 branch;
936
937 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
938
939 /* figure out which descriptor the branch address goes in */
940 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
941 return d;
942 else
943 return d + z - 1;
944 }
945
context_retire_descriptors(struct context * ctx)946 static void context_retire_descriptors(struct context *ctx)
947 {
948 struct descriptor *d, *last;
949 u32 address;
950 int z;
951 struct descriptor_buffer *desc;
952
953 desc = list_entry(ctx->buffer_list.next,
954 struct descriptor_buffer, list);
955 last = ctx->last;
956 while (last->branch_address != 0) {
957 struct descriptor_buffer *old_desc = desc;
958 address = le32_to_cpu(last->branch_address);
959 z = address & 0xf;
960 address &= ~0xf;
961 ctx->current_bus = address;
962
963 /* If the branch address points to a buffer outside of the
964 * current buffer, advance to the next buffer. */
965 if (address < desc->buffer_bus ||
966 address >= desc->buffer_bus + desc->used)
967 desc = list_entry(desc->list.next,
968 struct descriptor_buffer, list);
969 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
970 last = find_branch_descriptor(d, z);
971
972 if (!ctx->callback(ctx, d, last))
973 break;
974
975 if (old_desc != desc) {
976 // If we've advanced to the next buffer, move the previous buffer to the
977 // free list.
978 old_desc->used = 0;
979 guard(spinlock_irqsave)(&ctx->ohci->lock);
980 list_move_tail(&old_desc->list, &ctx->buffer_list);
981 }
982 ctx->last = last;
983 }
984 }
985
ohci_at_context_work(struct work_struct * work)986 static void ohci_at_context_work(struct work_struct *work)
987 {
988 struct at_context *ctx = from_work(ctx, work, work);
989
990 context_retire_descriptors(&ctx->context);
991 }
992
ohci_isoc_context_work(struct work_struct * work)993 static void ohci_isoc_context_work(struct work_struct *work)
994 {
995 struct fw_iso_context *base = from_work(base, work, work);
996 struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
997
998 context_retire_descriptors(&isoc_ctx->context);
999 }
1000
1001 /*
1002 * Allocate a new buffer and add it to the list of free buffers for this
1003 * context. Must be called with ohci->lock held.
1004 */
context_add_buffer(struct context * ctx)1005 static int context_add_buffer(struct context *ctx)
1006 {
1007 struct descriptor_buffer *desc;
1008 dma_addr_t bus_addr;
1009 int offset;
1010
1011 /*
1012 * 16MB of descriptors should be far more than enough for any DMA
1013 * program. This will catch run-away userspace or DoS attacks.
1014 */
1015 if (ctx->total_allocation >= 16*1024*1024)
1016 return -ENOMEM;
1017
1018 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
1019 if (!desc)
1020 return -ENOMEM;
1021
1022 offset = (void *)&desc->buffer - (void *)desc;
1023 /*
1024 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1025 * for descriptors, even 0x10-byte ones. This can cause page faults when
1026 * an IOMMU is in use and the oversized read crosses a page boundary.
1027 * Work around this by always leaving at least 0x10 bytes of padding.
1028 */
1029 desc->buffer_size = PAGE_SIZE - offset - 0x10;
1030 desc->buffer_bus = bus_addr + offset;
1031 desc->used = 0;
1032
1033 list_add_tail(&desc->list, &ctx->buffer_list);
1034 ctx->total_allocation += PAGE_SIZE;
1035
1036 return 0;
1037 }
1038
context_init(struct context * ctx,struct fw_ohci * ohci,u32 regs,descriptor_callback_t callback)1039 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1040 u32 regs, descriptor_callback_t callback)
1041 {
1042 ctx->ohci = ohci;
1043 ctx->regs = regs;
1044 ctx->total_allocation = 0;
1045
1046 INIT_LIST_HEAD(&ctx->buffer_list);
1047 if (context_add_buffer(ctx) < 0)
1048 return -ENOMEM;
1049
1050 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1051 struct descriptor_buffer, list);
1052
1053 ctx->callback = callback;
1054
1055 /*
1056 * We put a dummy descriptor in the buffer that has a NULL
1057 * branch address and looks like it's been sent. That way we
1058 * have a descriptor to append DMA programs to.
1059 */
1060 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1061 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1062 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1063 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1064 ctx->last = ctx->buffer_tail->buffer;
1065 ctx->prev = ctx->buffer_tail->buffer;
1066 ctx->prev_z = 1;
1067
1068 return 0;
1069 }
1070
context_release(struct context * ctx)1071 static void context_release(struct context *ctx)
1072 {
1073 struct fw_card *card = &ctx->ohci->card;
1074 struct descriptor_buffer *desc, *tmp;
1075
1076 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
1077 dmam_free_coherent(card->device, PAGE_SIZE, desc,
1078 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1079 }
1080 }
1081
1082 /* Must be called with ohci->lock held */
context_get_descriptors(struct context * ctx,int z,dma_addr_t * d_bus)1083 static struct descriptor *context_get_descriptors(struct context *ctx,
1084 int z, dma_addr_t *d_bus)
1085 {
1086 struct descriptor *d = NULL;
1087 struct descriptor_buffer *desc = ctx->buffer_tail;
1088
1089 if (z * sizeof(*d) > desc->buffer_size)
1090 return NULL;
1091
1092 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1093 /* No room for the descriptor in this buffer, so advance to the
1094 * next one. */
1095
1096 if (desc->list.next == &ctx->buffer_list) {
1097 /* If there is no free buffer next in the list,
1098 * allocate one. */
1099 if (context_add_buffer(ctx) < 0)
1100 return NULL;
1101 }
1102 desc = list_entry(desc->list.next,
1103 struct descriptor_buffer, list);
1104 ctx->buffer_tail = desc;
1105 }
1106
1107 d = desc->buffer + desc->used / sizeof(*d);
1108 memset(d, 0, z * sizeof(*d));
1109 *d_bus = desc->buffer_bus + desc->used;
1110
1111 return d;
1112 }
1113
context_run(struct context * ctx,u32 extra)1114 static void context_run(struct context *ctx, u32 extra)
1115 {
1116 struct fw_ohci *ohci = ctx->ohci;
1117
1118 reg_write(ohci, COMMAND_PTR(ctx->regs),
1119 le32_to_cpu(ctx->last->branch_address));
1120 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1121 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1122 ctx->running = true;
1123 flush_writes(ohci);
1124 }
1125
context_append(struct context * ctx,struct descriptor * d,int z,int extra)1126 static void context_append(struct context *ctx,
1127 struct descriptor *d, int z, int extra)
1128 {
1129 dma_addr_t d_bus;
1130 struct descriptor_buffer *desc = ctx->buffer_tail;
1131 struct descriptor *d_branch;
1132
1133 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1134
1135 desc->used += (z + extra) * sizeof(*d);
1136
1137 wmb(); /* finish init of new descriptors before branch_address update */
1138
1139 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1140 d_branch->branch_address = cpu_to_le32(d_bus | z);
1141
1142 /*
1143 * VT6306 incorrectly checks only the single descriptor at the
1144 * CommandPtr when the wake bit is written, so if it's a
1145 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1146 * the branch address in the first descriptor.
1147 *
1148 * Not doing this for transmit contexts since not sure how it interacts
1149 * with skip addresses.
1150 */
1151 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1152 d_branch != ctx->prev &&
1153 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1154 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1155 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1156 }
1157
1158 ctx->prev = d;
1159 ctx->prev_z = z;
1160 }
1161
context_stop(struct context * ctx)1162 static void context_stop(struct context *ctx)
1163 {
1164 struct fw_ohci *ohci = ctx->ohci;
1165 u32 reg;
1166 int i;
1167
1168 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1169 ctx->running = false;
1170
1171 for (i = 0; i < 1000; i++) {
1172 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1173 if ((reg & CONTEXT_ACTIVE) == 0)
1174 return;
1175
1176 if (i)
1177 udelay(10);
1178 }
1179 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1180 }
1181
1182 struct driver_data {
1183 u8 inline_data[8];
1184 struct fw_packet *packet;
1185 };
1186
1187 /*
1188 * This function appends a packet to the DMA queue for transmission.
1189 * Must always be called with the ochi->lock held to ensure proper
1190 * generation handling and locking around packet queue manipulation.
1191 */
at_context_queue_packet(struct at_context * ctx,struct fw_packet * packet)1192 static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
1193 {
1194 struct context *context = &ctx->context;
1195 struct fw_ohci *ohci = context->ohci;
1196 dma_addr_t d_bus, payload_bus;
1197 struct driver_data *driver_data;
1198 struct descriptor *d, *last;
1199 __le32 *header;
1200 int z, tcode;
1201
1202 d = context_get_descriptors(context, 4, &d_bus);
1203 if (d == NULL) {
1204 packet->ack = RCODE_SEND_ERROR;
1205 return -1;
1206 }
1207
1208 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1209 d[0].res_count = cpu_to_le16(packet->timestamp);
1210
1211 tcode = async_header_get_tcode(packet->header);
1212 header = (__le32 *) &d[1];
1213 switch (tcode) {
1214 case TCODE_WRITE_QUADLET_REQUEST:
1215 case TCODE_WRITE_BLOCK_REQUEST:
1216 case TCODE_WRITE_RESPONSE:
1217 case TCODE_READ_QUADLET_REQUEST:
1218 case TCODE_READ_BLOCK_REQUEST:
1219 case TCODE_READ_QUADLET_RESPONSE:
1220 case TCODE_READ_BLOCK_RESPONSE:
1221 case TCODE_LOCK_REQUEST:
1222 case TCODE_LOCK_RESPONSE:
1223 ohci1394_at_data_set_src_bus_id(header, false);
1224 ohci1394_at_data_set_speed(header, packet->speed);
1225 ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
1226 ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
1227 ohci1394_at_data_set_tcode(header, tcode);
1228
1229 ohci1394_at_data_set_destination_id(header,
1230 async_header_get_destination(packet->header));
1231
1232 if (ctx == &ohci->at_response_ctx) {
1233 ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
1234 } else {
1235 ohci1394_at_data_set_destination_offset(header,
1236 async_header_get_offset(packet->header));
1237 }
1238
1239 if (tcode_is_block_packet(tcode))
1240 header[3] = cpu_to_le32(packet->header[3]);
1241 else
1242 header[3] = (__force __le32) packet->header[3];
1243
1244 d[0].req_count = cpu_to_le16(packet->header_length);
1245 break;
1246 case TCODE_LINK_INTERNAL:
1247 ohci1394_at_data_set_speed(header, packet->speed);
1248 ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
1249
1250 header[1] = cpu_to_le32(packet->header[1]);
1251 header[2] = cpu_to_le32(packet->header[2]);
1252 d[0].req_count = cpu_to_le16(12);
1253
1254 if (is_ping_packet(&packet->header[1]))
1255 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1256 break;
1257
1258 case TCODE_STREAM_DATA:
1259 ohci1394_it_data_set_speed(header, packet->speed);
1260 ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
1261 ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
1262 ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
1263 ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
1264
1265 ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
1266
1267 d[0].req_count = cpu_to_le16(8);
1268 break;
1269
1270 default:
1271 /* BUG(); */
1272 packet->ack = RCODE_SEND_ERROR;
1273 return -1;
1274 }
1275
1276 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1277 driver_data = (struct driver_data *) &d[3];
1278 driver_data->packet = packet;
1279 packet->driver_data = driver_data;
1280
1281 if (packet->payload_length > 0) {
1282 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1283 payload_bus = dma_map_single(ohci->card.device,
1284 packet->payload,
1285 packet->payload_length,
1286 DMA_TO_DEVICE);
1287 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1288 packet->ack = RCODE_SEND_ERROR;
1289 return -1;
1290 }
1291 packet->payload_bus = payload_bus;
1292 packet->payload_mapped = true;
1293 } else {
1294 memcpy(driver_data->inline_data, packet->payload,
1295 packet->payload_length);
1296 payload_bus = d_bus + 3 * sizeof(*d);
1297 }
1298
1299 d[2].req_count = cpu_to_le16(packet->payload_length);
1300 d[2].data_address = cpu_to_le32(payload_bus);
1301 last = &d[2];
1302 z = 3;
1303 } else {
1304 last = &d[0];
1305 z = 2;
1306 }
1307
1308 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1309 DESCRIPTOR_IRQ_ALWAYS |
1310 DESCRIPTOR_BRANCH_ALWAYS);
1311
1312 /* FIXME: Document how the locking works. */
1313 if (ohci->generation != packet->generation) {
1314 if (packet->payload_mapped)
1315 dma_unmap_single(ohci->card.device, payload_bus,
1316 packet->payload_length, DMA_TO_DEVICE);
1317 packet->ack = RCODE_GENERATION;
1318 return -1;
1319 }
1320
1321 context_append(context, d, z, 4 - z);
1322
1323 if (context->running)
1324 reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE);
1325 else
1326 context_run(context, 0);
1327
1328 return 0;
1329 }
1330
at_context_flush(struct at_context * ctx)1331 static void at_context_flush(struct at_context *ctx)
1332 {
1333 // Avoid dead lock due to programming mistake.
1334 if (WARN_ON_ONCE(current_work() == &ctx->work))
1335 return;
1336
1337 disable_work_sync(&ctx->work);
1338
1339 WRITE_ONCE(ctx->flushing, true);
1340 ohci_at_context_work(&ctx->work);
1341 WRITE_ONCE(ctx->flushing, false);
1342
1343 enable_work(&ctx->work);
1344 }
1345
find_fw_device(struct device * dev,const void * data)1346 static int find_fw_device(struct device *dev, const void *data)
1347 {
1348 struct fw_device *device = fw_device(dev);
1349 const u32 *params = data;
1350
1351 return (device->generation == params[0]) && (device->node_id == params[1]);
1352 }
1353
handle_at_packet(struct context * context,struct descriptor * d,struct descriptor * last)1354 static int handle_at_packet(struct context *context,
1355 struct descriptor *d,
1356 struct descriptor *last)
1357 {
1358 struct at_context *ctx = container_of(context, struct at_context, context);
1359 struct fw_ohci *ohci = ctx->context.ohci;
1360 struct driver_data *driver_data;
1361 struct fw_packet *packet;
1362 int evt;
1363
1364 if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing))
1365 /* This descriptor isn't done yet, stop iteration. */
1366 return 0;
1367
1368 driver_data = (struct driver_data *) &d[3];
1369 packet = driver_data->packet;
1370 if (packet == NULL)
1371 /* This packet was cancelled, just continue. */
1372 return 1;
1373
1374 if (packet->payload_mapped)
1375 dma_unmap_single(ohci->card.device, packet->payload_bus,
1376 packet->payload_length, DMA_TO_DEVICE);
1377
1378 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1379 packet->timestamp = le16_to_cpu(last->res_count);
1380
1381 switch (evt) {
1382 case OHCI1394_evt_timeout:
1383 /* Async response transmit timed out. */
1384 packet->ack = RCODE_CANCELLED;
1385 break;
1386
1387 case OHCI1394_evt_flushed:
1388 /*
1389 * The packet was flushed should give same error as
1390 * when we try to use a stale generation count.
1391 */
1392 packet->ack = RCODE_GENERATION;
1393 break;
1394
1395 case OHCI1394_evt_missing_ack:
1396 if (READ_ONCE(ctx->flushing))
1397 packet->ack = RCODE_GENERATION;
1398 else {
1399 /*
1400 * Using a valid (current) generation count, but the
1401 * node is not on the bus or not sending acks.
1402 */
1403 packet->ack = RCODE_NO_ACK;
1404 }
1405 break;
1406
1407 case ACK_COMPLETE + 0x10:
1408 case ACK_PENDING + 0x10:
1409 case ACK_BUSY_X + 0x10:
1410 case ACK_BUSY_A + 0x10:
1411 case ACK_BUSY_B + 0x10:
1412 case ACK_DATA_ERROR + 0x10:
1413 case ACK_TYPE_ERROR + 0x10:
1414 packet->ack = evt - 0x10;
1415 break;
1416
1417 case OHCI1394_evt_no_status:
1418 if (READ_ONCE(ctx->flushing)) {
1419 packet->ack = RCODE_GENERATION;
1420 break;
1421 }
1422 fallthrough;
1423
1424 default:
1425 if (unlikely(evt == 0x10)) {
1426 u32 params[2] = {
1427 packet->generation,
1428 async_header_get_destination(packet->header),
1429 };
1430 struct device *dev;
1431
1432 fw_card_get(&ohci->card);
1433 dev = device_find_child(ohci->card.device, (const void *)params, find_fw_device);
1434 fw_card_put(&ohci->card);
1435 if (dev) {
1436 struct fw_device *device = fw_device(dev);
1437 int quirks = READ_ONCE(device->quirks);
1438
1439 put_device(dev);
1440 if (quirks & FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE) {
1441 packet->ack = ACK_PENDING;
1442 break;
1443 }
1444 }
1445 }
1446 packet->ack = RCODE_SEND_ERROR;
1447 break;
1448 }
1449
1450 packet->callback(packet, &ohci->card, packet->ack);
1451
1452 return 1;
1453 }
1454
1455 static u32 get_cycle_time(struct fw_ohci *ohci);
1456
handle_local_rom(struct fw_ohci * ohci,struct fw_packet * packet,u32 csr)1457 static void handle_local_rom(struct fw_ohci *ohci,
1458 struct fw_packet *packet, u32 csr)
1459 {
1460 struct fw_packet response;
1461 int tcode, length, i;
1462
1463 tcode = async_header_get_tcode(packet->header);
1464 if (tcode_is_block_packet(tcode))
1465 length = async_header_get_data_length(packet->header);
1466 else
1467 length = 4;
1468
1469 i = csr - CSR_CONFIG_ROM;
1470 if (i + length > CONFIG_ROM_SIZE) {
1471 fw_fill_response(&response, packet->header,
1472 RCODE_ADDRESS_ERROR, NULL, 0);
1473 } else if (!tcode_is_read_request(tcode)) {
1474 fw_fill_response(&response, packet->header,
1475 RCODE_TYPE_ERROR, NULL, 0);
1476 } else {
1477 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1478 (void *) ohci->config_rom + i, length);
1479 }
1480
1481 // Timestamping on behalf of the hardware.
1482 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1483 fw_core_handle_response(&ohci->card, &response);
1484 }
1485
handle_local_lock(struct fw_ohci * ohci,struct fw_packet * packet,u32 csr)1486 static void handle_local_lock(struct fw_ohci *ohci,
1487 struct fw_packet *packet, u32 csr)
1488 {
1489 struct fw_packet response;
1490 int tcode, length, ext_tcode, sel, try;
1491 __be32 *payload, lock_old;
1492 u32 lock_arg, lock_data;
1493
1494 tcode = async_header_get_tcode(packet->header);
1495 length = async_header_get_data_length(packet->header);
1496 payload = packet->payload;
1497 ext_tcode = async_header_get_extended_tcode(packet->header);
1498
1499 if (tcode == TCODE_LOCK_REQUEST &&
1500 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1501 lock_arg = be32_to_cpu(payload[0]);
1502 lock_data = be32_to_cpu(payload[1]);
1503 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1504 lock_arg = 0;
1505 lock_data = 0;
1506 } else {
1507 fw_fill_response(&response, packet->header,
1508 RCODE_TYPE_ERROR, NULL, 0);
1509 goto out;
1510 }
1511
1512 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1513 reg_write(ohci, OHCI1394_CSRData, lock_data);
1514 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1515 reg_write(ohci, OHCI1394_CSRControl, sel);
1516
1517 for (try = 0; try < 20; try++)
1518 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1519 lock_old = cpu_to_be32(reg_read(ohci,
1520 OHCI1394_CSRData));
1521 fw_fill_response(&response, packet->header,
1522 RCODE_COMPLETE,
1523 &lock_old, sizeof(lock_old));
1524 goto out;
1525 }
1526
1527 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1528 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1529
1530 out:
1531 // Timestamping on behalf of the hardware.
1532 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1533 fw_core_handle_response(&ohci->card, &response);
1534 }
1535
handle_local_request(struct at_context * ctx,struct fw_packet * packet)1536 static void handle_local_request(struct at_context *ctx, struct fw_packet *packet)
1537 {
1538 struct fw_ohci *ohci = ctx->context.ohci;
1539 u64 offset, csr;
1540
1541 if (ctx == &ohci->at_request_ctx) {
1542 packet->ack = ACK_PENDING;
1543 packet->callback(packet, &ohci->card, packet->ack);
1544 }
1545
1546 offset = async_header_get_offset(packet->header);
1547 csr = offset - CSR_REGISTER_BASE;
1548
1549 /* Handle config rom reads. */
1550 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1551 handle_local_rom(ohci, packet, csr);
1552 else switch (csr) {
1553 case CSR_BUS_MANAGER_ID:
1554 case CSR_BANDWIDTH_AVAILABLE:
1555 case CSR_CHANNELS_AVAILABLE_HI:
1556 case CSR_CHANNELS_AVAILABLE_LO:
1557 handle_local_lock(ohci, packet, csr);
1558 break;
1559 default:
1560 if (ctx == &ohci->at_request_ctx)
1561 fw_core_handle_request(&ohci->card, packet);
1562 else
1563 fw_core_handle_response(&ohci->card, packet);
1564 break;
1565 }
1566
1567 if (ctx == &ohci->at_response_ctx) {
1568 packet->ack = ACK_COMPLETE;
1569 packet->callback(packet, &ohci->card, packet->ack);
1570 }
1571 }
1572
at_context_transmit(struct at_context * ctx,struct fw_packet * packet)1573 static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
1574 {
1575 struct fw_ohci *ohci = ctx->context.ohci;
1576 unsigned long flags;
1577 int ret;
1578
1579 spin_lock_irqsave(&ohci->lock, flags);
1580
1581 if (async_header_get_destination(packet->header) == ohci->node_id &&
1582 ohci->generation == packet->generation) {
1583 spin_unlock_irqrestore(&ohci->lock, flags);
1584
1585 // Timestamping on behalf of the hardware.
1586 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1587
1588 handle_local_request(ctx, packet);
1589 return;
1590 }
1591
1592 ret = at_context_queue_packet(ctx, packet);
1593 spin_unlock_irqrestore(&ohci->lock, flags);
1594
1595 if (ret < 0) {
1596 // Timestamping on behalf of the hardware.
1597 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1598
1599 packet->callback(packet, &ohci->card, packet->ack);
1600 }
1601 }
1602
detect_dead_context(struct fw_ohci * ohci,const char * name,unsigned int regs)1603 static void detect_dead_context(struct fw_ohci *ohci,
1604 const char *name, unsigned int regs)
1605 {
1606 static const char *const evts[] = {
1607 [0x00] = "evt_no_status", [0x01] = "-reserved-",
1608 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
1609 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
1610 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
1611 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
1612 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
1613 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
1614 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
1615 [0x10] = "-reserved-", [0x11] = "ack_complete",
1616 [0x12] = "ack_pending ", [0x13] = "-reserved-",
1617 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
1618 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
1619 [0x18] = "-reserved-", [0x19] = "-reserved-",
1620 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
1621 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
1622 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
1623 [0x20] = "pending/cancelled",
1624 };
1625 u32 ctl;
1626
1627 ctl = reg_read(ohci, CONTROL_SET(regs));
1628 if (ctl & CONTEXT_DEAD)
1629 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1630 name, evts[ctl & 0x1f]);
1631 }
1632
handle_dead_contexts(struct fw_ohci * ohci)1633 static void handle_dead_contexts(struct fw_ohci *ohci)
1634 {
1635 unsigned int i;
1636 char name[8];
1637
1638 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1639 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1640 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1641 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1642 for (i = 0; i < 32; ++i) {
1643 if (!(ohci->it_context_support & (1 << i)))
1644 continue;
1645 sprintf(name, "IT%u", i);
1646 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1647 }
1648 for (i = 0; i < 32; ++i) {
1649 if (!(ohci->ir_context_support & (1 << i)))
1650 continue;
1651 sprintf(name, "IR%u", i);
1652 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1653 }
1654 /* TODO: maybe try to flush and restart the dead contexts */
1655 }
1656
cycle_timer_ticks(u32 cycle_timer)1657 static u32 cycle_timer_ticks(u32 cycle_timer)
1658 {
1659 u32 ticks;
1660
1661 ticks = cycle_timer & 0xfff;
1662 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1663 ticks += (3072 * 8000) * (cycle_timer >> 25);
1664
1665 return ticks;
1666 }
1667
1668 /*
1669 * Some controllers exhibit one or more of the following bugs when updating the
1670 * iso cycle timer register:
1671 * - When the lowest six bits are wrapping around to zero, a read that happens
1672 * at the same time will return garbage in the lowest ten bits.
1673 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1674 * not incremented for about 60 ns.
1675 * - Occasionally, the entire register reads zero.
1676 *
1677 * To catch these, we read the register three times and ensure that the
1678 * difference between each two consecutive reads is approximately the same, i.e.
1679 * less than twice the other. Furthermore, any negative difference indicates an
1680 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1681 * execute, so we have enough precision to compute the ratio of the differences.)
1682 */
get_cycle_time(struct fw_ohci * ohci)1683 static u32 get_cycle_time(struct fw_ohci *ohci)
1684 {
1685 u32 c0, c1, c2;
1686 u32 t0, t1, t2;
1687 s32 diff01, diff12;
1688 int i;
1689
1690 if (has_reboot_by_cycle_timer_read_quirk(ohci))
1691 return 0;
1692
1693 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1694
1695 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1696 i = 0;
1697 c1 = c2;
1698 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1699 do {
1700 c0 = c1;
1701 c1 = c2;
1702 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1703 t0 = cycle_timer_ticks(c0);
1704 t1 = cycle_timer_ticks(c1);
1705 t2 = cycle_timer_ticks(c2);
1706 diff01 = t1 - t0;
1707 diff12 = t2 - t1;
1708 } while ((diff01 <= 0 || diff12 <= 0 ||
1709 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1710 && i++ < 20);
1711 }
1712
1713 return c2;
1714 }
1715
1716 /*
1717 * This function has to be called at least every 64 seconds. The bus_time
1718 * field stores not only the upper 25 bits of the BUS_TIME register but also
1719 * the most significant bit of the cycle timer in bit 6 so that we can detect
1720 * changes in this bit.
1721 */
update_bus_time(struct fw_ohci * ohci)1722 static u32 update_bus_time(struct fw_ohci *ohci)
1723 {
1724 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1725
1726 if (unlikely(!ohci->bus_time_running)) {
1727 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1728 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1729 (cycle_time_seconds & 0x40);
1730 ohci->bus_time_running = true;
1731 }
1732
1733 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1734 ohci->bus_time += 0x40;
1735
1736 return ohci->bus_time | cycle_time_seconds;
1737 }
1738
get_status_for_port(struct fw_ohci * ohci,int port_index,enum phy_packet_self_id_port_status * status)1739 static int get_status_for_port(struct fw_ohci *ohci, int port_index,
1740 enum phy_packet_self_id_port_status *status)
1741 {
1742 int reg;
1743
1744 scoped_guard(mutex, &ohci->phy_reg_mutex) {
1745 reg = write_phy_reg(ohci, 7, port_index);
1746 if (reg < 0)
1747 return reg;
1748
1749 reg = read_phy_reg(ohci, 8);
1750 if (reg < 0)
1751 return reg;
1752 }
1753
1754 switch (reg & 0x0f) {
1755 case 0x06:
1756 // is child node (connected to parent node)
1757 *status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT;
1758 break;
1759 case 0x0e:
1760 // is parent node (connected to child node)
1761 *status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD;
1762 break;
1763 default:
1764 // not connected
1765 *status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN;
1766 break;
1767 }
1768
1769 return 0;
1770 }
1771
get_self_id_pos(struct fw_ohci * ohci,u32 self_id,int self_id_count)1772 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1773 int self_id_count)
1774 {
1775 unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id);
1776 int i;
1777
1778 for (i = 0; i < self_id_count; i++) {
1779 u32 entry = ohci->self_id_buffer[i];
1780 unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry);
1781
1782 if (left_phy_id == right_phy_id)
1783 return -1;
1784 if (left_phy_id < right_phy_id)
1785 return i;
1786 }
1787 return i;
1788 }
1789
detect_initiated_reset(struct fw_ohci * ohci,bool * is_initiated_reset)1790 static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
1791 {
1792 int reg;
1793
1794 guard(mutex)(&ohci->phy_reg_mutex);
1795
1796 // Select page 7
1797 reg = write_phy_reg(ohci, 7, 0xe0);
1798 if (reg < 0)
1799 return reg;
1800
1801 reg = read_phy_reg(ohci, 8);
1802 if (reg < 0)
1803 return reg;
1804
1805 // set PMODE bit
1806 reg |= 0x40;
1807 reg = write_phy_reg(ohci, 8, reg);
1808 if (reg < 0)
1809 return reg;
1810
1811 // read register 12
1812 reg = read_phy_reg(ohci, 12);
1813 if (reg < 0)
1814 return reg;
1815
1816 // bit 3 indicates "initiated reset"
1817 *is_initiated_reset = !!((reg & 0x08) == 0x08);
1818
1819 return 0;
1820 }
1821
1822 /*
1823 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1824 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1825 * Construct the selfID from phy register contents.
1826 */
find_and_insert_self_id(struct fw_ohci * ohci,int self_id_count)1827 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1828 {
1829 int reg, i, pos, err;
1830 bool is_initiated_reset;
1831 u32 self_id = 0;
1832
1833 // link active 1, speed 3, bridge 0, contender 1, more packets 0.
1834 phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
1835 phy_packet_self_id_zero_set_link_active(&self_id, true);
1836 phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
1837 phy_packet_self_id_zero_set_contender(&self_id, true);
1838
1839 reg = reg_read(ohci, OHCI1394_NodeID);
1840 if (!(reg & OHCI1394_NodeID_idValid)) {
1841 ohci_notice(ohci,
1842 "node ID not valid, new bus reset in progress\n");
1843 return -EBUSY;
1844 }
1845 phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
1846
1847 reg = ohci_read_phy_reg(&ohci->card, 4);
1848 if (reg < 0)
1849 return reg;
1850 phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07);
1851
1852 reg = ohci_read_phy_reg(&ohci->card, 1);
1853 if (reg < 0)
1854 return reg;
1855 phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f);
1856
1857 for (i = 0; i < 3; i++) {
1858 enum phy_packet_self_id_port_status status;
1859
1860 err = get_status_for_port(ohci, i, &status);
1861 if (err < 0)
1862 return err;
1863
1864 self_id_sequence_set_port_status(&self_id, 1, i, status);
1865 }
1866
1867 err = detect_initiated_reset(ohci, &is_initiated_reset);
1868 if (err < 0)
1869 return err;
1870 phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
1871
1872 pos = get_self_id_pos(ohci, self_id, self_id_count);
1873 if (pos >= 0) {
1874 memmove(&(ohci->self_id_buffer[pos+1]),
1875 &(ohci->self_id_buffer[pos]),
1876 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1877 ohci->self_id_buffer[pos] = self_id;
1878 self_id_count++;
1879 }
1880 return self_id_count;
1881 }
1882
handle_selfid_complete_event(int irq,void * data)1883 static irqreturn_t handle_selfid_complete_event(int irq, void *data)
1884 {
1885 struct fw_ohci *ohci = data;
1886 int self_id_count, generation, new_generation, i, j;
1887 u32 reg, quadlet;
1888 void *free_rom = NULL;
1889 dma_addr_t free_rom_bus = 0;
1890 bool is_new_root;
1891
1892 reg = reg_read(ohci, OHCI1394_NodeID);
1893 if (!(reg & OHCI1394_NodeID_idValid)) {
1894 ohci_notice(ohci,
1895 "node ID not valid, new bus reset in progress\n");
1896 goto end;
1897 }
1898 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1899 ohci_notice(ohci, "malconfigured bus\n");
1900 goto end;
1901 }
1902 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1903 OHCI1394_NodeID_nodeNumber);
1904
1905 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1906 if (!(ohci->is_root && is_new_root))
1907 reg_write(ohci, OHCI1394_LinkControlSet,
1908 OHCI1394_LinkControl_cycleMaster);
1909 ohci->is_root = is_new_root;
1910
1911 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1912 if (ohci1394_self_id_count_is_error(reg)) {
1913 ohci_notice(ohci, "self ID receive error\n");
1914 goto end;
1915 }
1916
1917 trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
1918
1919 /*
1920 * The count in the SelfIDCount register is the number of
1921 * bytes in the self ID receive buffer. Since we also receive
1922 * the inverted quadlets and a header quadlet, we shift one
1923 * bit extra to get the actual number of self IDs.
1924 */
1925 self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
1926
1927 if (self_id_count > 252) {
1928 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1929 goto end;
1930 }
1931
1932 quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
1933 generation = ohci1394_self_id_receive_q0_get_generation(quadlet);
1934 rmb();
1935
1936 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1937 u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
1938 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
1939
1940 if (id != ~id2) {
1941 /*
1942 * If the invalid data looks like a cycle start packet,
1943 * it's likely to be the result of the cycle master
1944 * having a wrong gap count. In this case, the self IDs
1945 * so far are valid and should be processed so that the
1946 * bus manager can then correct the gap count.
1947 */
1948 if (id == 0xffff008f) {
1949 ohci_notice(ohci, "ignoring spurious self IDs\n");
1950 self_id_count = j;
1951 break;
1952 }
1953
1954 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
1955 j, self_id_count, id, id2);
1956 goto end;
1957 }
1958 ohci->self_id_buffer[j] = id;
1959 }
1960
1961 if (ohci->quirks & QUIRK_TI_SLLZ059) {
1962 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1963 if (self_id_count < 0) {
1964 ohci_notice(ohci,
1965 "could not construct local self ID\n");
1966 goto end;
1967 }
1968 }
1969
1970 if (self_id_count == 0) {
1971 ohci_notice(ohci, "no self IDs\n");
1972 goto end;
1973 }
1974 rmb();
1975
1976 /*
1977 * Check the consistency of the self IDs we just read. The
1978 * problem we face is that a new bus reset can start while we
1979 * read out the self IDs from the DMA buffer. If this happens,
1980 * the DMA buffer will be overwritten with new self IDs and we
1981 * will read out inconsistent data. The OHCI specification
1982 * (section 11.2) recommends a technique similar to
1983 * linux/seqlock.h, where we remember the generation of the
1984 * self IDs in the buffer before reading them out and compare
1985 * it to the current generation after reading them out. If
1986 * the two generations match we know we have a consistent set
1987 * of self IDs.
1988 */
1989
1990 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1991 new_generation = ohci1394_self_id_count_get_generation(reg);
1992 if (new_generation != generation) {
1993 ohci_notice(ohci, "new bus reset, discarding self ids\n");
1994 goto end;
1995 }
1996
1997 // FIXME: Document how the locking works.
1998 scoped_guard(spinlock_irq, &ohci->lock) {
1999 ohci->generation = -1; // prevent AT packet queueing
2000 context_stop(&ohci->at_request_ctx.context);
2001 context_stop(&ohci->at_response_ctx.context);
2002 }
2003
2004 /*
2005 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2006 * packets in the AT queues and software needs to drain them.
2007 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2008 */
2009 at_context_flush(&ohci->at_request_ctx);
2010 at_context_flush(&ohci->at_response_ctx);
2011
2012 scoped_guard(spinlock_irq, &ohci->lock) {
2013 ohci->generation = generation;
2014 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2015 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2016
2017 if (ohci->quirks & QUIRK_RESET_PACKET)
2018 ohci->request_generation = generation;
2019
2020 // This next bit is unrelated to the AT context stuff but we have to do it under the
2021 // spinlock also. If a new config rom was set up before this reset, the old one is
2022 // now no longer in use and we can free it. Update the config rom pointers to point
2023 // to the current config rom and clear the next_config_rom pointer so a new update
2024 // can take place.
2025 if (ohci->next_config_rom != NULL) {
2026 if (ohci->next_config_rom != ohci->config_rom) {
2027 free_rom = ohci->config_rom;
2028 free_rom_bus = ohci->config_rom_bus;
2029 }
2030 ohci->config_rom = ohci->next_config_rom;
2031 ohci->config_rom_bus = ohci->next_config_rom_bus;
2032 ohci->next_config_rom = NULL;
2033
2034 // Restore config_rom image and manually update config_rom registers.
2035 // Writing the header quadlet will indicate that the config rom is ready,
2036 // so we do that last.
2037 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
2038 ohci->config_rom[0] = ohci->next_header;
2039 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
2040 }
2041
2042 if (param_remote_dma) {
2043 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2044 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2045 }
2046 }
2047
2048 if (free_rom)
2049 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
2050
2051 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2052 self_id_count, ohci->self_id_buffer,
2053 ohci->csr_state_setclear_abdicate);
2054 ohci->csr_state_setclear_abdicate = false;
2055 end:
2056 return IRQ_HANDLED;
2057 }
2058
irq_handler(int irq,void * data)2059 static irqreturn_t irq_handler(int irq, void *data)
2060 {
2061 struct fw_ohci *ohci = data;
2062 u32 event, iso_event;
2063 int i;
2064
2065 event = reg_read(ohci, OHCI1394_IntEventClear);
2066
2067 if (!event || !~event)
2068 return IRQ_NONE;
2069
2070 /*
2071 * busReset and postedWriteErr events must not be cleared yet
2072 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2073 */
2074 reg_write(ohci, OHCI1394_IntEventClear,
2075 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2076 trace_irqs(ohci->card.index, event);
2077
2078 // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
2079 if (event & OHCI1394_busReset)
2080 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2081
2082 if (event & OHCI1394_RQPkt)
2083 queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
2084
2085 if (event & OHCI1394_RSPkt)
2086 queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
2087
2088 if (event & OHCI1394_reqTxComplete)
2089 queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
2090
2091 if (event & OHCI1394_respTxComplete)
2092 queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
2093
2094 if (event & OHCI1394_isochRx) {
2095 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2096 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2097
2098 while (iso_event) {
2099 i = ffs(iso_event) - 1;
2100 fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
2101 iso_event &= ~(1 << i);
2102 }
2103 }
2104
2105 if (event & OHCI1394_isochTx) {
2106 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2107 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2108
2109 while (iso_event) {
2110 i = ffs(iso_event) - 1;
2111 fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
2112 iso_event &= ~(1 << i);
2113 }
2114 }
2115
2116 if (unlikely(event & OHCI1394_regAccessFail))
2117 ohci_err(ohci, "register access failure\n");
2118
2119 if (unlikely(event & OHCI1394_postedWriteErr)) {
2120 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2121 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2122 reg_write(ohci, OHCI1394_IntEventClear,
2123 OHCI1394_postedWriteErr);
2124 dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
2125 }
2126
2127 if (unlikely(event & OHCI1394_cycleTooLong)) {
2128 dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
2129 reg_write(ohci, OHCI1394_LinkControlSet,
2130 OHCI1394_LinkControl_cycleMaster);
2131 }
2132
2133 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2134 /*
2135 * We need to clear this event bit in order to make
2136 * cycleMatch isochronous I/O work. In theory we should
2137 * stop active cycleMatch iso contexts now and restart
2138 * them at least two cycles later. (FIXME?)
2139 */
2140 dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
2141 }
2142
2143 if (unlikely(event & OHCI1394_unrecoverableError))
2144 handle_dead_contexts(ohci);
2145
2146 if (event & OHCI1394_cycle64Seconds) {
2147 guard(spinlock)(&ohci->lock);
2148 update_bus_time(ohci);
2149 } else
2150 flush_writes(ohci);
2151
2152 if (event & OHCI1394_selfIDComplete)
2153 return IRQ_WAKE_THREAD;
2154 else
2155 return IRQ_HANDLED;
2156 }
2157
software_reset(struct fw_ohci * ohci)2158 static int software_reset(struct fw_ohci *ohci)
2159 {
2160 u32 val;
2161 int i;
2162
2163 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2164 for (i = 0; i < 500; i++) {
2165 val = reg_read(ohci, OHCI1394_HCControlSet);
2166 if (!~val)
2167 return -ENODEV; /* Card was ejected. */
2168
2169 if (!(val & OHCI1394_HCControl_softReset))
2170 return 0;
2171
2172 msleep(1);
2173 }
2174
2175 return -EBUSY;
2176 }
2177
copy_config_rom(__be32 * dest,const __be32 * src,size_t length)2178 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2179 {
2180 size_t size = length * 4;
2181
2182 memcpy(dest, src, size);
2183 if (size < CONFIG_ROM_SIZE)
2184 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2185 }
2186
configure_1394a_enhancements(struct fw_ohci * ohci)2187 static int configure_1394a_enhancements(struct fw_ohci *ohci)
2188 {
2189 bool enable_1394a;
2190 int ret, clear, set, offset;
2191
2192 /* Check if the driver should configure link and PHY. */
2193 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2194 OHCI1394_HCControl_programPhyEnable))
2195 return 0;
2196
2197 /* Paranoia: check whether the PHY supports 1394a, too. */
2198 enable_1394a = false;
2199 ret = read_phy_reg(ohci, 2);
2200 if (ret < 0)
2201 return ret;
2202 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2203 ret = read_paged_phy_reg(ohci, 1, 8);
2204 if (ret < 0)
2205 return ret;
2206 if (ret >= 1)
2207 enable_1394a = true;
2208 }
2209
2210 if (ohci->quirks & QUIRK_NO_1394A)
2211 enable_1394a = false;
2212
2213 /* Configure PHY and link consistently. */
2214 if (enable_1394a) {
2215 clear = 0;
2216 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2217 } else {
2218 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2219 set = 0;
2220 }
2221 ret = update_phy_reg(ohci, 5, clear, set);
2222 if (ret < 0)
2223 return ret;
2224
2225 if (enable_1394a)
2226 offset = OHCI1394_HCControlSet;
2227 else
2228 offset = OHCI1394_HCControlClear;
2229 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2230
2231 /* Clean up: configuration has been taken care of. */
2232 reg_write(ohci, OHCI1394_HCControlClear,
2233 OHCI1394_HCControl_programPhyEnable);
2234
2235 return 0;
2236 }
2237
probe_tsb41ba3d(struct fw_ohci * ohci)2238 static int probe_tsb41ba3d(struct fw_ohci *ohci)
2239 {
2240 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2241 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2242 int reg, i;
2243
2244 reg = read_phy_reg(ohci, 2);
2245 if (reg < 0)
2246 return reg;
2247 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2248 return 0;
2249
2250 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2251 reg = read_paged_phy_reg(ohci, 1, i + 10);
2252 if (reg < 0)
2253 return reg;
2254 if (reg != id[i])
2255 return 0;
2256 }
2257 return 1;
2258 }
2259
ohci_enable(struct fw_card * card,const __be32 * config_rom,size_t length)2260 static int ohci_enable(struct fw_card *card,
2261 const __be32 *config_rom, size_t length)
2262 {
2263 struct fw_ohci *ohci = fw_ohci(card);
2264 u32 lps, version, irqs;
2265 int i, ret;
2266
2267 ret = software_reset(ohci);
2268 if (ret < 0) {
2269 ohci_err(ohci, "failed to reset ohci card\n");
2270 return ret;
2271 }
2272
2273 /*
2274 * Now enable LPS, which we need in order to start accessing
2275 * most of the registers. In fact, on some cards (ALI M5251),
2276 * accessing registers in the SClk domain without LPS enabled
2277 * will lock up the machine. Wait 50msec to make sure we have
2278 * full link enabled. However, with some cards (well, at least
2279 * a JMicron PCIe card), we have to try again sometimes.
2280 *
2281 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2282 * cannot actually use the phy at that time. These need tens of
2283 * millisecods pause between LPS write and first phy access too.
2284 */
2285
2286 reg_write(ohci, OHCI1394_HCControlSet,
2287 OHCI1394_HCControl_LPS |
2288 OHCI1394_HCControl_postedWriteEnable);
2289 flush_writes(ohci);
2290
2291 for (lps = 0, i = 0; !lps && i < 3; i++) {
2292 msleep(50);
2293 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2294 OHCI1394_HCControl_LPS;
2295 }
2296
2297 if (!lps) {
2298 ohci_err(ohci, "failed to set Link Power Status\n");
2299 return -EIO;
2300 }
2301
2302 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2303 ret = probe_tsb41ba3d(ohci);
2304 if (ret < 0)
2305 return ret;
2306 if (ret)
2307 ohci_notice(ohci, "local TSB41BA3D phy\n");
2308 else
2309 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2310 }
2311
2312 reg_write(ohci, OHCI1394_HCControlClear,
2313 OHCI1394_HCControl_noByteSwapData);
2314
2315 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2316 reg_write(ohci, OHCI1394_LinkControlSet,
2317 OHCI1394_LinkControl_cycleTimerEnable |
2318 OHCI1394_LinkControl_cycleMaster);
2319
2320 reg_write(ohci, OHCI1394_ATRetries,
2321 OHCI1394_MAX_AT_REQ_RETRIES |
2322 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2323 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2324 (200 << 16));
2325
2326 ohci->bus_time_running = false;
2327
2328 for (i = 0; i < 32; i++)
2329 if (ohci->ir_context_support & (1 << i))
2330 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2331 IR_CONTEXT_MULTI_CHANNEL_MODE);
2332
2333 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2334 if (version >= OHCI_VERSION_1_1) {
2335 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2336 0xfffffffe);
2337 card->broadcast_channel_auto_allocated = true;
2338 }
2339
2340 /* Get implemented bits of the priority arbitration request counter. */
2341 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2342 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2343 reg_write(ohci, OHCI1394_FairnessControl, 0);
2344 card->priority_budget_implemented = ohci->pri_req_max != 0;
2345
2346 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2347 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2348 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2349
2350 ret = configure_1394a_enhancements(ohci);
2351 if (ret < 0)
2352 return ret;
2353
2354 /* Activate link_on bit and contender bit in our self ID packets.*/
2355 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2356 if (ret < 0)
2357 return ret;
2358
2359 /*
2360 * When the link is not yet enabled, the atomic config rom
2361 * update mechanism described below in ohci_set_config_rom()
2362 * is not active. We have to update ConfigRomHeader and
2363 * BusOptions manually, and the write to ConfigROMmap takes
2364 * effect immediately. We tie this to the enabling of the
2365 * link, so we have a valid config rom before enabling - the
2366 * OHCI requires that ConfigROMhdr and BusOptions have valid
2367 * values before enabling.
2368 *
2369 * However, when the ConfigROMmap is written, some controllers
2370 * always read back quadlets 0 and 2 from the config rom to
2371 * the ConfigRomHeader and BusOptions registers on bus reset.
2372 * They shouldn't do that in this initial case where the link
2373 * isn't enabled. This means we have to use the same
2374 * workaround here, setting the bus header to 0 and then write
2375 * the right values in the bus reset work item.
2376 */
2377
2378 if (config_rom) {
2379 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2380 &ohci->next_config_rom_bus, GFP_KERNEL);
2381 if (ohci->next_config_rom == NULL)
2382 return -ENOMEM;
2383
2384 copy_config_rom(ohci->next_config_rom, config_rom, length);
2385 } else {
2386 /*
2387 * In the suspend case, config_rom is NULL, which
2388 * means that we just reuse the old config rom.
2389 */
2390 ohci->next_config_rom = ohci->config_rom;
2391 ohci->next_config_rom_bus = ohci->config_rom_bus;
2392 }
2393
2394 ohci->next_header = ohci->next_config_rom[0];
2395 ohci->next_config_rom[0] = 0;
2396 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2397 reg_write(ohci, OHCI1394_BusOptions,
2398 be32_to_cpu(ohci->next_config_rom[2]));
2399 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2400
2401 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2402
2403 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2404 OHCI1394_RQPkt | OHCI1394_RSPkt |
2405 OHCI1394_isochTx | OHCI1394_isochRx |
2406 OHCI1394_postedWriteErr |
2407 OHCI1394_selfIDComplete |
2408 OHCI1394_regAccessFail |
2409 OHCI1394_cycleInconsistent |
2410 OHCI1394_unrecoverableError |
2411 OHCI1394_cycleTooLong |
2412 OHCI1394_masterIntEnable |
2413 OHCI1394_busReset;
2414 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2415
2416 reg_write(ohci, OHCI1394_HCControlSet,
2417 OHCI1394_HCControl_linkEnable |
2418 OHCI1394_HCControl_BIBimageValid);
2419
2420 reg_write(ohci, OHCI1394_LinkControlSet,
2421 OHCI1394_LinkControl_rcvSelfID |
2422 OHCI1394_LinkControl_rcvPhyPkt);
2423
2424 ar_context_run(&ohci->ar_request_ctx);
2425 ar_context_run(&ohci->ar_response_ctx);
2426
2427 flush_writes(ohci);
2428
2429 /* We are ready to go, reset bus to finish initialization. */
2430 fw_schedule_bus_reset(&ohci->card, false, true);
2431
2432 return 0;
2433 }
2434
ohci_disable(struct fw_card * card)2435 static void ohci_disable(struct fw_card *card)
2436 {
2437 struct pci_dev *pdev = to_pci_dev(card->device);
2438 struct fw_ohci *ohci = pci_get_drvdata(pdev);
2439 int i, irq = pci_irq_vector(pdev, 0);
2440
2441 // If the removal is happening from the suspend state, LPS won't be enabled and host
2442 // registers (eg., IntMaskClear) won't be accessible.
2443 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS))
2444 return;
2445
2446 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2447 flush_writes(ohci);
2448
2449 if (irq >= 0)
2450 synchronize_irq(irq);
2451
2452 flush_work(&ohci->ar_request_ctx.work);
2453 flush_work(&ohci->ar_response_ctx.work);
2454 flush_work(&ohci->at_request_ctx.work);
2455 flush_work(&ohci->at_response_ctx.work);
2456
2457 for (i = 0; i < ohci->n_ir; ++i) {
2458 if (!(ohci->ir_context_mask & BIT(i)))
2459 flush_work(&ohci->ir_context_list[i].base.work);
2460 }
2461 for (i = 0; i < ohci->n_it; ++i) {
2462 if (!(ohci->it_context_mask & BIT(i)))
2463 flush_work(&ohci->it_context_list[i].base.work);
2464 }
2465
2466 at_context_flush(&ohci->at_request_ctx);
2467 at_context_flush(&ohci->at_response_ctx);
2468 }
2469
ohci_set_config_rom(struct fw_card * card,const __be32 * config_rom,size_t length)2470 static int ohci_set_config_rom(struct fw_card *card,
2471 const __be32 *config_rom, size_t length)
2472 {
2473 struct fw_ohci *ohci;
2474 __be32 *next_config_rom;
2475 dma_addr_t next_config_rom_bus;
2476
2477 ohci = fw_ohci(card);
2478
2479 /*
2480 * When the OHCI controller is enabled, the config rom update
2481 * mechanism is a bit tricky, but easy enough to use. See
2482 * section 5.5.6 in the OHCI specification.
2483 *
2484 * The OHCI controller caches the new config rom address in a
2485 * shadow register (ConfigROMmapNext) and needs a bus reset
2486 * for the changes to take place. When the bus reset is
2487 * detected, the controller loads the new values for the
2488 * ConfigRomHeader and BusOptions registers from the specified
2489 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2490 * shadow register. All automatically and atomically.
2491 *
2492 * Now, there's a twist to this story. The automatic load of
2493 * ConfigRomHeader and BusOptions doesn't honor the
2494 * noByteSwapData bit, so with a be32 config rom, the
2495 * controller will load be32 values in to these registers
2496 * during the atomic update, even on little endian
2497 * architectures. The workaround we use is to put a 0 in the
2498 * header quadlet; 0 is endian agnostic and means that the
2499 * config rom isn't ready yet. In the bus reset work item we
2500 * then set up the real values for the two registers.
2501 *
2502 * We use ohci->lock to avoid racing with the code that sets
2503 * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
2504 */
2505
2506 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2507 &next_config_rom_bus, GFP_KERNEL);
2508 if (next_config_rom == NULL)
2509 return -ENOMEM;
2510
2511 scoped_guard(spinlock_irq, &ohci->lock) {
2512 // If there is not an already pending config_rom update, push our new allocation
2513 // into the ohci->next_config_rom and then mark the local variable as null so that
2514 // we won't deallocate the new buffer.
2515 //
2516 // OTOH, if there is a pending config_rom update, just use that buffer with the new
2517 // config_rom data, and let this routine free the unused DMA allocation.
2518 if (ohci->next_config_rom == NULL) {
2519 ohci->next_config_rom = next_config_rom;
2520 ohci->next_config_rom_bus = next_config_rom_bus;
2521 next_config_rom = NULL;
2522 }
2523
2524 copy_config_rom(ohci->next_config_rom, config_rom, length);
2525
2526 ohci->next_header = config_rom[0];
2527 ohci->next_config_rom[0] = 0;
2528
2529 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2530 }
2531
2532 /* If we didn't use the DMA allocation, delete it. */
2533 if (next_config_rom != NULL) {
2534 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2535 next_config_rom_bus);
2536 }
2537
2538 /*
2539 * Now initiate a bus reset to have the changes take
2540 * effect. We clean up the old config rom memory and DMA
2541 * mappings in the bus reset work item, since the OHCI
2542 * controller could need to access it before the bus reset
2543 * takes effect.
2544 */
2545
2546 fw_schedule_bus_reset(&ohci->card, true, true);
2547
2548 return 0;
2549 }
2550
ohci_send_request(struct fw_card * card,struct fw_packet * packet)2551 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2552 {
2553 struct fw_ohci *ohci = fw_ohci(card);
2554
2555 at_context_transmit(&ohci->at_request_ctx, packet);
2556 }
2557
ohci_send_response(struct fw_card * card,struct fw_packet * packet)2558 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2559 {
2560 struct fw_ohci *ohci = fw_ohci(card);
2561
2562 at_context_transmit(&ohci->at_response_ctx, packet);
2563 }
2564
ohci_cancel_packet(struct fw_card * card,struct fw_packet * packet)2565 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2566 {
2567 struct fw_ohci *ohci = fw_ohci(card);
2568 struct at_context *ctx = &ohci->at_request_ctx;
2569 struct driver_data *driver_data = packet->driver_data;
2570 int ret = -ENOENT;
2571
2572 // Avoid dead lock due to programming mistake.
2573 if (WARN_ON_ONCE(current_work() == &ctx->work))
2574 return 0;
2575 disable_work_sync(&ctx->work);
2576
2577 if (packet->ack != 0)
2578 goto out;
2579
2580 if (packet->payload_mapped)
2581 dma_unmap_single(ohci->card.device, packet->payload_bus,
2582 packet->payload_length, DMA_TO_DEVICE);
2583
2584 driver_data->packet = NULL;
2585 packet->ack = RCODE_CANCELLED;
2586
2587 // Timestamping on behalf of the hardware.
2588 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2589
2590 packet->callback(packet, &ohci->card, packet->ack);
2591 ret = 0;
2592 out:
2593 enable_work(&ctx->work);
2594
2595 return ret;
2596 }
2597
ohci_enable_phys_dma(struct fw_card * card,int node_id,int generation)2598 static int ohci_enable_phys_dma(struct fw_card *card,
2599 int node_id, int generation)
2600 {
2601 struct fw_ohci *ohci = fw_ohci(card);
2602 int n, ret = 0;
2603
2604 if (param_remote_dma)
2605 return 0;
2606
2607 /*
2608 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2609 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2610 */
2611
2612 guard(spinlock_irqsave)(&ohci->lock);
2613
2614 if (ohci->generation != generation)
2615 return -ESTALE;
2616
2617 /*
2618 * Note, if the node ID contains a non-local bus ID, physical DMA is
2619 * enabled for _all_ nodes on remote buses.
2620 */
2621
2622 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2623 if (n < 32)
2624 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2625 else
2626 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2627
2628 flush_writes(ohci);
2629
2630 return ret;
2631 }
2632
ohci_read_csr(struct fw_card * card,int csr_offset)2633 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2634 {
2635 struct fw_ohci *ohci = fw_ohci(card);
2636 u32 value;
2637
2638 switch (csr_offset) {
2639 case CSR_STATE_CLEAR:
2640 case CSR_STATE_SET:
2641 if (ohci->is_root &&
2642 (reg_read(ohci, OHCI1394_LinkControlSet) &
2643 OHCI1394_LinkControl_cycleMaster))
2644 value = CSR_STATE_BIT_CMSTR;
2645 else
2646 value = 0;
2647 if (ohci->csr_state_setclear_abdicate)
2648 value |= CSR_STATE_BIT_ABDICATE;
2649
2650 return value;
2651
2652 case CSR_NODE_IDS:
2653 return reg_read(ohci, OHCI1394_NodeID) << 16;
2654
2655 case CSR_CYCLE_TIME:
2656 return get_cycle_time(ohci);
2657
2658 case CSR_BUS_TIME:
2659 {
2660 // We might be called just after the cycle timer has wrapped around but just before
2661 // the cycle64Seconds handler, so we better check here, too, if the bus time needs
2662 // to be updated.
2663
2664 guard(spinlock_irqsave)(&ohci->lock);
2665 return update_bus_time(ohci);
2666 }
2667 case CSR_BUSY_TIMEOUT:
2668 value = reg_read(ohci, OHCI1394_ATRetries);
2669 return (value >> 4) & 0x0ffff00f;
2670
2671 case CSR_PRIORITY_BUDGET:
2672 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2673 (ohci->pri_req_max << 8);
2674
2675 default:
2676 WARN_ON(1);
2677 return 0;
2678 }
2679 }
2680
ohci_write_csr(struct fw_card * card,int csr_offset,u32 value)2681 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2682 {
2683 struct fw_ohci *ohci = fw_ohci(card);
2684
2685 switch (csr_offset) {
2686 case CSR_STATE_CLEAR:
2687 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2688 reg_write(ohci, OHCI1394_LinkControlClear,
2689 OHCI1394_LinkControl_cycleMaster);
2690 flush_writes(ohci);
2691 }
2692 if (value & CSR_STATE_BIT_ABDICATE)
2693 ohci->csr_state_setclear_abdicate = false;
2694 break;
2695
2696 case CSR_STATE_SET:
2697 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2698 reg_write(ohci, OHCI1394_LinkControlSet,
2699 OHCI1394_LinkControl_cycleMaster);
2700 flush_writes(ohci);
2701 }
2702 if (value & CSR_STATE_BIT_ABDICATE)
2703 ohci->csr_state_setclear_abdicate = true;
2704 break;
2705
2706 case CSR_NODE_IDS:
2707 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2708 flush_writes(ohci);
2709 break;
2710
2711 case CSR_CYCLE_TIME:
2712 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2713 reg_write(ohci, OHCI1394_IntEventSet,
2714 OHCI1394_cycleInconsistent);
2715 flush_writes(ohci);
2716 break;
2717
2718 case CSR_BUS_TIME:
2719 {
2720 guard(spinlock_irqsave)(&ohci->lock);
2721 ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
2722 break;
2723 }
2724 case CSR_BUSY_TIMEOUT:
2725 value = (value & 0xf) | ((value & 0xf) << 4) |
2726 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2727 reg_write(ohci, OHCI1394_ATRetries, value);
2728 flush_writes(ohci);
2729 break;
2730
2731 case CSR_PRIORITY_BUDGET:
2732 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2733 flush_writes(ohci);
2734 break;
2735
2736 default:
2737 WARN_ON(1);
2738 break;
2739 }
2740 }
2741
flush_iso_completions(struct iso_context * ctx,enum fw_iso_context_completions_cause cause)2742 static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
2743 {
2744 trace_isoc_inbound_single_completions(&ctx->base, ctx->sc.last_timestamp, cause,
2745 ctx->sc.header, ctx->sc.header_length);
2746 trace_isoc_outbound_completions(&ctx->base, ctx->sc.last_timestamp, cause, ctx->sc.header,
2747 ctx->sc.header_length);
2748
2749 ctx->base.callback.sc(&ctx->base, ctx->sc.last_timestamp, ctx->sc.header_length,
2750 ctx->sc.header, ctx->base.callback_data);
2751 ctx->sc.header_length = 0;
2752 }
2753
copy_iso_headers(struct iso_context * ctx,const u32 * dma_hdr)2754 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2755 {
2756 u32 *ctx_hdr;
2757
2758 if (ctx->sc.header_length + ctx->base.header_size > ctx->base.header_storage_size) {
2759 if (ctx->base.flags & FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS)
2760 return;
2761 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
2762 }
2763
2764 ctx_hdr = ctx->sc.header + ctx->sc.header_length;
2765 ctx->sc.last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2766
2767 /*
2768 * The two iso header quadlets are byteswapped to little
2769 * endian by the controller, but we want to present them
2770 * as big endian for consistency with the bus endianness.
2771 */
2772 if (ctx->base.header_size > 0)
2773 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2774 if (ctx->base.header_size > 4)
2775 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2776 if (ctx->base.header_size > 8)
2777 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2778 ctx->sc.header_length += ctx->base.header_size;
2779 }
2780
handle_ir_packet_per_buffer(struct context * context,struct descriptor * d,struct descriptor * last)2781 static int handle_ir_packet_per_buffer(struct context *context,
2782 struct descriptor *d,
2783 struct descriptor *last)
2784 {
2785 struct iso_context *ctx =
2786 container_of(context, struct iso_context, context);
2787 struct descriptor *pd;
2788 u32 buffer_dma;
2789
2790 for (pd = d; pd <= last; pd++)
2791 if (pd->transfer_status)
2792 break;
2793 if (pd > last)
2794 /* Descriptor(s) not done yet, stop iteration */
2795 return 0;
2796
2797 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2798 d++;
2799 buffer_dma = le32_to_cpu(d->data_address);
2800 dma_sync_single_range_for_cpu(context->ohci->card.device,
2801 buffer_dma & PAGE_MASK,
2802 buffer_dma & ~PAGE_MASK,
2803 le16_to_cpu(d->req_count),
2804 DMA_FROM_DEVICE);
2805 }
2806
2807 copy_iso_headers(ctx, (u32 *) (last + 1));
2808
2809 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2810 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2811
2812 return 1;
2813 }
2814
2815 /* d == last because each descriptor block is only a single descriptor. */
handle_ir_buffer_fill(struct context * context,struct descriptor * d,struct descriptor * last)2816 static int handle_ir_buffer_fill(struct context *context,
2817 struct descriptor *d,
2818 struct descriptor *last)
2819 {
2820 struct iso_context *ctx =
2821 container_of(context, struct iso_context, context);
2822 unsigned int req_count, res_count, completed;
2823 u32 buffer_dma;
2824
2825 req_count = le16_to_cpu(last->req_count);
2826 res_count = le16_to_cpu(READ_ONCE(last->res_count));
2827 completed = req_count - res_count;
2828 buffer_dma = le32_to_cpu(last->data_address);
2829
2830 if (completed > 0) {
2831 ctx->mc.buffer_bus = buffer_dma;
2832 ctx->mc.completed = completed;
2833 }
2834
2835 if (res_count != 0)
2836 /* Descriptor(s) not done yet, stop iteration */
2837 return 0;
2838
2839 dma_sync_single_range_for_cpu(context->ohci->card.device,
2840 buffer_dma & PAGE_MASK,
2841 buffer_dma & ~PAGE_MASK,
2842 completed, DMA_FROM_DEVICE);
2843
2844 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2845 trace_isoc_inbound_multiple_completions(&ctx->base, completed,
2846 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2847
2848 ctx->base.callback.mc(&ctx->base,
2849 buffer_dma + completed,
2850 ctx->base.callback_data);
2851 ctx->mc.completed = 0;
2852 }
2853
2854 return 1;
2855 }
2856
flush_ir_buffer_fill(struct iso_context * ctx)2857 static void flush_ir_buffer_fill(struct iso_context *ctx)
2858 {
2859 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2860 ctx->mc.buffer_bus & PAGE_MASK,
2861 ctx->mc.buffer_bus & ~PAGE_MASK,
2862 ctx->mc.completed, DMA_FROM_DEVICE);
2863
2864 trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc.completed,
2865 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
2866
2867 ctx->base.callback.mc(&ctx->base, ctx->mc.buffer_bus + ctx->mc.completed,
2868 ctx->base.callback_data);
2869 ctx->mc.completed = 0;
2870 }
2871
sync_it_packet_for_cpu(struct context * context,struct descriptor * pd)2872 static inline void sync_it_packet_for_cpu(struct context *context,
2873 struct descriptor *pd)
2874 {
2875 __le16 control;
2876 u32 buffer_dma;
2877
2878 /* only packets beginning with OUTPUT_MORE* have data buffers */
2879 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2880 return;
2881
2882 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2883 pd += 2;
2884
2885 /*
2886 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2887 * data buffer is in the context program's coherent page and must not
2888 * be synced.
2889 */
2890 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2891 (context->current_bus & PAGE_MASK)) {
2892 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2893 return;
2894 pd++;
2895 }
2896
2897 do {
2898 buffer_dma = le32_to_cpu(pd->data_address);
2899 dma_sync_single_range_for_cpu(context->ohci->card.device,
2900 buffer_dma & PAGE_MASK,
2901 buffer_dma & ~PAGE_MASK,
2902 le16_to_cpu(pd->req_count),
2903 DMA_TO_DEVICE);
2904 control = pd->control;
2905 pd++;
2906 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2907 }
2908
handle_it_packet(struct context * context,struct descriptor * d,struct descriptor * last)2909 static int handle_it_packet(struct context *context,
2910 struct descriptor *d,
2911 struct descriptor *last)
2912 {
2913 struct iso_context *ctx =
2914 container_of(context, struct iso_context, context);
2915 struct descriptor *pd;
2916 __be32 *ctx_hdr;
2917
2918 for (pd = d; pd <= last; pd++)
2919 if (pd->transfer_status)
2920 break;
2921 if (pd > last)
2922 /* Descriptor(s) not done yet, stop iteration */
2923 return 0;
2924
2925 sync_it_packet_for_cpu(context, d);
2926
2927 if (ctx->sc.header_length + 4 > ctx->base.header_storage_size) {
2928 if (ctx->base.flags & FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS)
2929 return 1;
2930 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
2931 }
2932
2933 ctx_hdr = ctx->sc.header + ctx->sc.header_length;
2934 ctx->sc.last_timestamp = le16_to_cpu(last->res_count);
2935 /* Present this value as big-endian to match the receive code */
2936 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2937 le16_to_cpu(pd->res_count));
2938 ctx->sc.header_length += 4;
2939
2940 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2941 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
2942
2943 return 1;
2944 }
2945
set_multichannel_mask(struct fw_ohci * ohci,u64 channels)2946 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2947 {
2948 u32 hi = channels >> 32, lo = channels;
2949
2950 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2951 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2952 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2953 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2954 ohci->mc_channels = channels;
2955 }
2956
ohci_allocate_iso_context(struct fw_card * card,int type,int channel,size_t header_size,size_t header_storage_size)2957 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, int type, int channel,
2958 size_t header_size, size_t header_storage_size)
2959 {
2960 struct fw_ohci *ohci = fw_ohci(card);
2961 void *header __free(kvfree) = NULL;
2962 struct iso_context *ctx;
2963 descriptor_callback_t callback;
2964 u64 *channels;
2965 u32 *mask, regs;
2966 int index, ret = -EBUSY;
2967
2968 scoped_guard(spinlock_irq, &ohci->lock) {
2969 switch (type) {
2970 case FW_ISO_CONTEXT_TRANSMIT:
2971 mask = &ohci->it_context_mask;
2972 callback = handle_it_packet;
2973 index = ffs(*mask) - 1;
2974 if (index >= 0) {
2975 *mask &= ~(1 << index);
2976 regs = OHCI1394_IsoXmitContextBase(index);
2977 ctx = &ohci->it_context_list[index];
2978 }
2979 break;
2980
2981 case FW_ISO_CONTEXT_RECEIVE:
2982 channels = &ohci->ir_context_channels;
2983 mask = &ohci->ir_context_mask;
2984 callback = handle_ir_packet_per_buffer;
2985 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2986 if (index >= 0) {
2987 *channels &= ~(1ULL << channel);
2988 *mask &= ~(1 << index);
2989 regs = OHCI1394_IsoRcvContextBase(index);
2990 ctx = &ohci->ir_context_list[index];
2991 }
2992 break;
2993
2994 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2995 mask = &ohci->ir_context_mask;
2996 callback = handle_ir_buffer_fill;
2997 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2998 if (index >= 0) {
2999 ohci->mc_allocated = true;
3000 *mask &= ~(1 << index);
3001 regs = OHCI1394_IsoRcvContextBase(index);
3002 ctx = &ohci->ir_context_list[index];
3003 }
3004 break;
3005
3006 default:
3007 index = -1;
3008 ret = -ENOSYS;
3009 }
3010
3011 if (index < 0)
3012 return ERR_PTR(ret);
3013 }
3014
3015 memset(ctx, 0, sizeof(*ctx));
3016
3017 if (type != FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3018 ctx->sc.header_length = 0;
3019 header = kvmalloc(header_storage_size, GFP_KERNEL);
3020 if (!header) {
3021 ret = -ENOMEM;
3022 goto out;
3023 }
3024 }
3025
3026 ret = context_init(&ctx->context, ohci, regs, callback);
3027 if (ret < 0)
3028 goto out;
3029 fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
3030
3031 if (type != FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3032 ctx->sc.header = no_free_ptr(header);
3033 } else {
3034 set_multichannel_mask(ohci, 0);
3035 ctx->mc.completed = 0;
3036 }
3037
3038 return &ctx->base;
3039 out:
3040 scoped_guard(spinlock_irq, &ohci->lock) {
3041 switch (type) {
3042 case FW_ISO_CONTEXT_RECEIVE:
3043 *channels |= 1ULL << channel;
3044 break;
3045
3046 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3047 ohci->mc_allocated = false;
3048 break;
3049 }
3050 *mask |= 1 << index;
3051 }
3052
3053 return ERR_PTR(ret);
3054 }
3055
ohci_start_iso(struct fw_iso_context * base,s32 cycle,u32 sync,u32 tags)3056 static int ohci_start_iso(struct fw_iso_context *base,
3057 s32 cycle, u32 sync, u32 tags)
3058 {
3059 struct iso_context *ctx = container_of(base, struct iso_context, base);
3060 struct fw_ohci *ohci = ctx->context.ohci;
3061 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3062 int index;
3063
3064 /* the controller cannot start without any queued packets */
3065 if (ctx->context.last->branch_address == 0)
3066 return -ENODATA;
3067
3068 switch (ctx->base.type) {
3069 case FW_ISO_CONTEXT_TRANSMIT:
3070 index = ctx - ohci->it_context_list;
3071 match = 0;
3072 if (cycle >= 0)
3073 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3074 (cycle & 0x7fff) << 16;
3075
3076 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3077 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3078 context_run(&ctx->context, match);
3079 break;
3080
3081 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3082 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3083 fallthrough;
3084 case FW_ISO_CONTEXT_RECEIVE:
3085 index = ctx - ohci->ir_context_list;
3086 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3087 if (cycle >= 0) {
3088 match |= (cycle & 0x07fff) << 12;
3089 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3090 }
3091
3092 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3093 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3094 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3095 context_run(&ctx->context, control);
3096
3097 ctx->sync = sync;
3098 ctx->tags = tags;
3099
3100 break;
3101 }
3102
3103 return 0;
3104 }
3105
ohci_stop_iso(struct fw_iso_context * base)3106 static int ohci_stop_iso(struct fw_iso_context *base)
3107 {
3108 struct fw_ohci *ohci = fw_ohci(base->card);
3109 struct iso_context *ctx = container_of(base, struct iso_context, base);
3110 int index;
3111
3112 switch (ctx->base.type) {
3113 case FW_ISO_CONTEXT_TRANSMIT:
3114 index = ctx - ohci->it_context_list;
3115 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3116 break;
3117
3118 case FW_ISO_CONTEXT_RECEIVE:
3119 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3120 index = ctx - ohci->ir_context_list;
3121 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3122 break;
3123 }
3124 flush_writes(ohci);
3125 context_stop(&ctx->context);
3126
3127 return 0;
3128 }
3129
ohci_free_iso_context(struct fw_iso_context * base)3130 static void ohci_free_iso_context(struct fw_iso_context *base)
3131 {
3132 struct fw_ohci *ohci = fw_ohci(base->card);
3133 struct iso_context *ctx = container_of(base, struct iso_context, base);
3134 int index;
3135
3136 ohci_stop_iso(base);
3137 context_release(&ctx->context);
3138
3139 if (base->type != FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3140 kvfree(ctx->sc.header);
3141 ctx->sc.header = NULL;
3142 }
3143
3144 guard(spinlock_irqsave)(&ohci->lock);
3145
3146 switch (base->type) {
3147 case FW_ISO_CONTEXT_TRANSMIT:
3148 index = ctx - ohci->it_context_list;
3149 ohci->it_context_mask |= 1 << index;
3150 break;
3151
3152 case FW_ISO_CONTEXT_RECEIVE:
3153 index = ctx - ohci->ir_context_list;
3154 ohci->ir_context_mask |= 1 << index;
3155 ohci->ir_context_channels |= 1ULL << base->channel;
3156 break;
3157
3158 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3159 index = ctx - ohci->ir_context_list;
3160 ohci->ir_context_mask |= 1 << index;
3161 ohci->ir_context_channels |= ohci->mc_channels;
3162 ohci->mc_channels = 0;
3163 ohci->mc_allocated = false;
3164 break;
3165 }
3166 }
3167
ohci_set_iso_channels(struct fw_iso_context * base,u64 * channels)3168 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3169 {
3170 struct fw_ohci *ohci = fw_ohci(base->card);
3171
3172 switch (base->type) {
3173 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3174 {
3175 guard(spinlock_irqsave)(&ohci->lock);
3176
3177 // Don't allow multichannel to grab other contexts' channels.
3178 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3179 *channels = ohci->ir_context_channels;
3180 return -EBUSY;
3181 } else {
3182 set_multichannel_mask(ohci, *channels);
3183 return 0;
3184 }
3185 }
3186 default:
3187 return -EINVAL;
3188 }
3189 }
3190
ohci_resume_iso_dma(struct fw_ohci * ohci)3191 static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
3192 {
3193 int i;
3194 struct iso_context *ctx;
3195
3196 for (i = 0 ; i < ohci->n_ir ; i++) {
3197 ctx = &ohci->ir_context_list[i];
3198 if (ctx->context.running)
3199 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3200 }
3201
3202 for (i = 0 ; i < ohci->n_it ; i++) {
3203 ctx = &ohci->it_context_list[i];
3204 if (ctx->context.running)
3205 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3206 }
3207 }
3208
queue_iso_transmit(struct iso_context * ctx,struct fw_iso_packet * packet,struct fw_iso_buffer * buffer,unsigned long payload)3209 static int queue_iso_transmit(struct iso_context *ctx,
3210 struct fw_iso_packet *packet,
3211 struct fw_iso_buffer *buffer,
3212 unsigned long payload)
3213 {
3214 struct descriptor *d, *last, *pd;
3215 struct fw_iso_packet *p;
3216 __le32 *header;
3217 dma_addr_t d_bus;
3218 u32 z, header_z, payload_z, irq;
3219 u32 payload_index, payload_end_index, next_page_index;
3220 int page, end_page, i, length, offset;
3221
3222 p = packet;
3223 payload_index = payload;
3224
3225 if (p->skip)
3226 z = 1;
3227 else
3228 z = 2;
3229 if (p->header_length > 0)
3230 z++;
3231
3232 /* Determine the first page the payload isn't contained in. */
3233 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3234 if (p->payload_length > 0)
3235 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3236 else
3237 payload_z = 0;
3238
3239 z += payload_z;
3240
3241 /* Get header size in number of descriptors. */
3242 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3243
3244 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3245 if (d == NULL)
3246 return -ENOMEM;
3247
3248 if (!p->skip) {
3249 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3250 d[0].req_count = cpu_to_le16(8);
3251 /*
3252 * Link the skip address to this descriptor itself. This causes
3253 * a context to skip a cycle whenever lost cycles or FIFO
3254 * overruns occur, without dropping the data. The application
3255 * should then decide whether this is an error condition or not.
3256 * FIXME: Make the context's cycle-lost behaviour configurable?
3257 */
3258 d[0].branch_address = cpu_to_le32(d_bus | z);
3259
3260 header = (__le32 *) &d[1];
3261
3262 ohci1394_it_data_set_speed(header, ctx->base.speed);
3263 ohci1394_it_data_set_tag(header, p->tag);
3264 ohci1394_it_data_set_channel(header, ctx->base.channel);
3265 ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
3266 ohci1394_it_data_set_sync(header, p->sy);
3267
3268 ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
3269 }
3270
3271 if (p->header_length > 0) {
3272 d[2].req_count = cpu_to_le16(p->header_length);
3273 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3274 memcpy(&d[z], p->header, p->header_length);
3275 }
3276
3277 pd = d + z - payload_z;
3278 payload_end_index = payload_index + p->payload_length;
3279 for (i = 0; i < payload_z; i++) {
3280 page = payload_index >> PAGE_SHIFT;
3281 offset = payload_index & ~PAGE_MASK;
3282 next_page_index = (page + 1) << PAGE_SHIFT;
3283 length =
3284 min(next_page_index, payload_end_index) - payload_index;
3285 pd[i].req_count = cpu_to_le16(length);
3286
3287 dma_addr_t dma_addr = buffer->dma_addrs[page];
3288 pd[i].data_address = cpu_to_le32(dma_addr + offset);
3289
3290 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3291 dma_addr, offset, length,
3292 DMA_TO_DEVICE);
3293
3294 payload_index += length;
3295 }
3296
3297 if (p->interrupt)
3298 irq = DESCRIPTOR_IRQ_ALWAYS;
3299 else
3300 irq = DESCRIPTOR_NO_IRQ;
3301
3302 last = z == 2 ? d : d + z - 1;
3303 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3304 DESCRIPTOR_STATUS |
3305 DESCRIPTOR_BRANCH_ALWAYS |
3306 irq);
3307
3308 context_append(&ctx->context, d, z, header_z);
3309
3310 return 0;
3311 }
3312
queue_iso_packet_per_buffer(struct iso_context * ctx,struct fw_iso_packet * packet,struct fw_iso_buffer * buffer,unsigned long payload)3313 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3314 struct fw_iso_packet *packet,
3315 struct fw_iso_buffer *buffer,
3316 unsigned long payload)
3317 {
3318 struct device *device = ctx->context.ohci->card.device;
3319 struct descriptor *d, *pd;
3320 dma_addr_t d_bus;
3321 u32 z, header_z, rest;
3322 int i, j, length;
3323 int page, offset, packet_count, header_size, payload_per_buffer;
3324
3325 /*
3326 * The OHCI controller puts the isochronous header and trailer in the
3327 * buffer, so we need at least 8 bytes.
3328 */
3329 packet_count = packet->header_length / ctx->base.header_size;
3330 header_size = max(ctx->base.header_size, (size_t)8);
3331
3332 /* Get header size in number of descriptors. */
3333 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3334 page = payload >> PAGE_SHIFT;
3335 offset = payload & ~PAGE_MASK;
3336 payload_per_buffer = packet->payload_length / packet_count;
3337
3338 for (i = 0; i < packet_count; i++) {
3339 /* d points to the header descriptor */
3340 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3341 d = context_get_descriptors(&ctx->context,
3342 z + header_z, &d_bus);
3343 if (d == NULL)
3344 return -ENOMEM;
3345
3346 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3347 DESCRIPTOR_INPUT_MORE);
3348 if (packet->skip && i == 0)
3349 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3350 d->req_count = cpu_to_le16(header_size);
3351 d->res_count = d->req_count;
3352 d->transfer_status = 0;
3353 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3354
3355 rest = payload_per_buffer;
3356 pd = d;
3357 for (j = 1; j < z; j++) {
3358 pd++;
3359 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3360 DESCRIPTOR_INPUT_MORE);
3361
3362 if (offset + rest < PAGE_SIZE)
3363 length = rest;
3364 else
3365 length = PAGE_SIZE - offset;
3366 pd->req_count = cpu_to_le16(length);
3367 pd->res_count = pd->req_count;
3368 pd->transfer_status = 0;
3369
3370 dma_addr_t dma_addr = buffer->dma_addrs[page];
3371 pd->data_address = cpu_to_le32(dma_addr + offset);
3372
3373 dma_sync_single_range_for_device(device, dma_addr,
3374 offset, length,
3375 DMA_FROM_DEVICE);
3376
3377 offset = (offset + length) & ~PAGE_MASK;
3378 rest -= length;
3379 if (offset == 0)
3380 page++;
3381 }
3382 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3383 DESCRIPTOR_INPUT_LAST |
3384 DESCRIPTOR_BRANCH_ALWAYS);
3385 if (packet->interrupt && i == packet_count - 1)
3386 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3387
3388 context_append(&ctx->context, d, z, header_z);
3389 }
3390
3391 return 0;
3392 }
3393
queue_iso_buffer_fill(struct iso_context * ctx,struct fw_iso_packet * packet,struct fw_iso_buffer * buffer,unsigned long payload)3394 static int queue_iso_buffer_fill(struct iso_context *ctx,
3395 struct fw_iso_packet *packet,
3396 struct fw_iso_buffer *buffer,
3397 unsigned long payload)
3398 {
3399 struct descriptor *d;
3400 dma_addr_t d_bus;
3401 int page, offset, rest, z, i, length;
3402
3403 page = payload >> PAGE_SHIFT;
3404 offset = payload & ~PAGE_MASK;
3405 rest = packet->payload_length;
3406
3407 /* We need one descriptor for each page in the buffer. */
3408 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3409
3410 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3411 return -EFAULT;
3412
3413 for (i = 0; i < z; i++) {
3414 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3415 if (d == NULL)
3416 return -ENOMEM;
3417
3418 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3419 DESCRIPTOR_BRANCH_ALWAYS);
3420 if (packet->skip && i == 0)
3421 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3422 if (packet->interrupt && i == z - 1)
3423 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3424
3425 if (offset + rest < PAGE_SIZE)
3426 length = rest;
3427 else
3428 length = PAGE_SIZE - offset;
3429 d->req_count = cpu_to_le16(length);
3430 d->res_count = d->req_count;
3431 d->transfer_status = 0;
3432
3433 dma_addr_t dma_addr = buffer->dma_addrs[page];
3434 d->data_address = cpu_to_le32(dma_addr + offset);
3435
3436 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3437 dma_addr, offset, length,
3438 DMA_FROM_DEVICE);
3439
3440 rest -= length;
3441 offset = 0;
3442 page++;
3443
3444 context_append(&ctx->context, d, 1, 0);
3445 }
3446
3447 return 0;
3448 }
3449
ohci_queue_iso(struct fw_iso_context * base,struct fw_iso_packet * packet,struct fw_iso_buffer * buffer,unsigned long payload)3450 static int ohci_queue_iso(struct fw_iso_context *base,
3451 struct fw_iso_packet *packet,
3452 struct fw_iso_buffer *buffer,
3453 unsigned long payload)
3454 {
3455 struct iso_context *ctx = container_of(base, struct iso_context, base);
3456
3457 guard(spinlock_irqsave)(&ctx->context.ohci->lock);
3458
3459 switch (base->type) {
3460 case FW_ISO_CONTEXT_TRANSMIT:
3461 return queue_iso_transmit(ctx, packet, buffer, payload);
3462 case FW_ISO_CONTEXT_RECEIVE:
3463 return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3464 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3465 return queue_iso_buffer_fill(ctx, packet, buffer, payload);
3466 default:
3467 return -ENOSYS;
3468 }
3469 }
3470
ohci_flush_queue_iso(struct fw_iso_context * base)3471 static void ohci_flush_queue_iso(struct fw_iso_context *base)
3472 {
3473 struct context *ctx =
3474 &container_of(base, struct iso_context, base)->context;
3475
3476 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3477 }
3478
ohci_flush_iso_completions(struct fw_iso_context * base)3479 static int ohci_flush_iso_completions(struct fw_iso_context *base)
3480 {
3481 struct iso_context *ctx = container_of(base, struct iso_context, base);
3482 int ret = 0;
3483
3484 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3485 ohci_isoc_context_work(&base->work);
3486
3487 switch (base->type) {
3488 case FW_ISO_CONTEXT_TRANSMIT:
3489 case FW_ISO_CONTEXT_RECEIVE:
3490 if (ctx->sc.header_length != 0)
3491 flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
3492 break;
3493 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3494 if (ctx->mc.completed != 0)
3495 flush_ir_buffer_fill(ctx);
3496 break;
3497 default:
3498 ret = -ENOSYS;
3499 }
3500
3501 clear_bit_unlock(0, &ctx->flushing_completions);
3502 smp_mb__after_atomic();
3503 }
3504
3505 return ret;
3506 }
3507
3508 static const struct fw_card_driver ohci_driver = {
3509 .enable = ohci_enable,
3510 .disable = ohci_disable,
3511 .read_phy_reg = ohci_read_phy_reg,
3512 .update_phy_reg = ohci_update_phy_reg,
3513 .set_config_rom = ohci_set_config_rom,
3514 .send_request = ohci_send_request,
3515 .send_response = ohci_send_response,
3516 .cancel_packet = ohci_cancel_packet,
3517 .enable_phys_dma = ohci_enable_phys_dma,
3518 .read_csr = ohci_read_csr,
3519 .write_csr = ohci_write_csr,
3520
3521 .allocate_iso_context = ohci_allocate_iso_context,
3522 .free_iso_context = ohci_free_iso_context,
3523 .set_iso_channels = ohci_set_iso_channels,
3524 .queue_iso = ohci_queue_iso,
3525 .flush_queue_iso = ohci_flush_queue_iso,
3526 .flush_iso_completions = ohci_flush_iso_completions,
3527 .start_iso = ohci_start_iso,
3528 .stop_iso = ohci_stop_iso,
3529 };
3530
3531 #ifdef CONFIG_PPC_PMAC
pmac_ohci_on(struct pci_dev * dev)3532 static void pmac_ohci_on(struct pci_dev *dev)
3533 {
3534 if (machine_is(powermac)) {
3535 struct device_node *ofn = pci_device_to_OF_node(dev);
3536
3537 if (ofn) {
3538 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3539 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3540 }
3541 }
3542 }
3543
pmac_ohci_off(struct pci_dev * dev)3544 static void pmac_ohci_off(struct pci_dev *dev)
3545 {
3546 if (machine_is(powermac)) {
3547 struct device_node *ofn = pci_device_to_OF_node(dev);
3548
3549 if (ofn) {
3550 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3551 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3552 }
3553 }
3554 }
3555 #else
pmac_ohci_on(struct pci_dev * dev)3556 static inline void pmac_ohci_on(struct pci_dev *dev) {}
pmac_ohci_off(struct pci_dev * dev)3557 static inline void pmac_ohci_off(struct pci_dev *dev) {}
3558 #endif /* CONFIG_PPC_PMAC */
3559
release_ohci(struct device * dev,void * data)3560 static void release_ohci(struct device *dev, void *data)
3561 {
3562 struct pci_dev *pdev = to_pci_dev(dev);
3563 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3564
3565 pmac_ohci_off(pdev);
3566
3567 ar_context_release(&ohci->ar_response_ctx);
3568 ar_context_release(&ohci->ar_request_ctx);
3569
3570 dev_notice(dev, "removed fw-ohci device\n");
3571 }
3572
pci_probe(struct pci_dev * dev,const struct pci_device_id * ent)3573 static int pci_probe(struct pci_dev *dev,
3574 const struct pci_device_id *ent)
3575 {
3576 struct fw_ohci *ohci;
3577 u32 bus_options, max_receive, link_speed, version;
3578 u64 guid;
3579 int i, flags, irq, err;
3580
3581 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3582 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3583 return -ENOSYS;
3584 }
3585
3586 ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3587 if (ohci == NULL)
3588 return -ENOMEM;
3589 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3590 pci_set_drvdata(dev, ohci);
3591 pmac_ohci_on(dev);
3592 devres_add(&dev->dev, ohci);
3593
3594 err = pcim_enable_device(dev);
3595 if (err) {
3596 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3597 return err;
3598 }
3599
3600 pci_set_master(dev);
3601 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3602
3603 spin_lock_init(&ohci->lock);
3604 mutex_init(&ohci->phy_reg_mutex);
3605
3606 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3607 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3608 ohci_err(ohci, "invalid MMIO resource\n");
3609 return -ENXIO;
3610 }
3611
3612 ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name);
3613 if (IS_ERR(ohci->registers)) {
3614 ohci_err(ohci, "request and map MMIO resource unavailable\n");
3615 return -ENXIO;
3616 }
3617
3618 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3619 if ((ohci_quirks[i].vendor == dev->vendor) &&
3620 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3621 ohci_quirks[i].device == dev->device) &&
3622 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3623 ohci_quirks[i].revision >= dev->revision)) {
3624 ohci->quirks = ohci_quirks[i].flags;
3625 break;
3626 }
3627 if (param_quirks)
3628 ohci->quirks = param_quirks;
3629
3630 if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
3631 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3632
3633 /*
3634 * Because dma_alloc_coherent() allocates at least one page,
3635 * we save space by using a common buffer for the AR request/
3636 * response descriptors and the self IDs buffer.
3637 */
3638 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3639 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3640 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3641 GFP_KERNEL);
3642 if (!ohci->misc_buffer)
3643 return -ENOMEM;
3644
3645 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3646 OHCI1394_AsReqRcvContextControlSet);
3647 if (err < 0)
3648 return err;
3649
3650 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3651 OHCI1394_AsRspRcvContextControlSet);
3652 if (err < 0)
3653 return err;
3654
3655 err = context_init(&ohci->at_request_ctx.context, ohci,
3656 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3657 if (err < 0)
3658 return err;
3659 INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
3660
3661 err = context_init(&ohci->at_response_ctx.context, ohci,
3662 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3663 if (err < 0)
3664 return err;
3665 INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
3666
3667 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3668 ohci->ir_context_channels = ~0ULL;
3669 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3670 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3671 ohci->ir_context_mask = ohci->ir_context_support;
3672 ohci->n_ir = hweight32(ohci->ir_context_mask);
3673 ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
3674 if (!ohci->ir_context_list)
3675 return -ENOMEM;
3676
3677 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3678 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3679 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3680 if (!ohci->it_context_support) {
3681 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3682 ohci->it_context_support = 0xf;
3683 }
3684 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3685 ohci->it_context_mask = ohci->it_context_support;
3686 ohci->n_it = hweight32(ohci->it_context_mask);
3687 ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
3688 if (!ohci->it_context_list)
3689 return -ENOMEM;
3690
3691 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3692 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3693
3694 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3695 max_receive = (bus_options >> 12) & 0xf;
3696 link_speed = bus_options & 0x7;
3697 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3698 reg_read(ohci, OHCI1394_GUIDLo);
3699
3700 flags = PCI_IRQ_INTX;
3701 if (!(ohci->quirks & QUIRK_NO_MSI))
3702 flags |= PCI_IRQ_MSI;
3703 err = pci_alloc_irq_vectors(dev, 1, 1, flags);
3704 if (err < 0)
3705 return err;
3706 irq = pci_irq_vector(dev, 0);
3707 if (irq < 0) {
3708 err = irq;
3709 goto fail_msi;
3710 }
3711
3712 // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
3713 // invoking the threaded IRQ handler for SelfIDComplete event.
3714 err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
3715 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
3716 ohci);
3717 if (err < 0) {
3718 ohci_err(ohci, "failed to allocate interrupt %d\n", irq);
3719 goto fail_msi;
3720 }
3721
3722 err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
3723 if (err)
3724 goto fail_irq;
3725
3726 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3727 ohci_notice(ohci,
3728 "added OHCI v%x.%x device as card %d, "
3729 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3730 version >> 16, version & 0xff, ohci->card.index,
3731 ohci->n_ir, ohci->n_it, ohci->quirks,
3732 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3733 ", physUB" : "");
3734
3735 return 0;
3736
3737 fail_irq:
3738 free_irq(irq, ohci);
3739 fail_msi:
3740 pci_free_irq_vectors(dev);
3741
3742 return err;
3743 }
3744
pci_remove(struct pci_dev * dev)3745 static void pci_remove(struct pci_dev *dev)
3746 {
3747 struct fw_ohci *ohci = pci_get_drvdata(dev);
3748 int irq;
3749
3750 fw_core_remove_card(&ohci->card);
3751
3752 software_reset(ohci);
3753
3754 irq = pci_irq_vector(dev, 0);
3755 if (irq >= 0)
3756 free_irq(irq, ohci);
3757 pci_free_irq_vectors(dev);
3758
3759 dev_notice(&dev->dev, "removing fw-ohci device\n");
3760 }
3761
pci_suspend(struct device * dev)3762 static int __maybe_unused pci_suspend(struct device *dev)
3763 {
3764 struct pci_dev *pdev = to_pci_dev(dev);
3765 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3766
3767 software_reset(ohci);
3768 pmac_ohci_off(pdev);
3769
3770 return 0;
3771 }
3772
3773
pci_resume(struct device * dev)3774 static int __maybe_unused pci_resume(struct device *dev)
3775 {
3776 struct pci_dev *pdev = to_pci_dev(dev);
3777 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3778 int err;
3779
3780 pmac_ohci_on(pdev);
3781
3782 /* Some systems don't setup GUID register on resume from ram */
3783 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3784 !reg_read(ohci, OHCI1394_GUIDHi)) {
3785 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3786 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3787 }
3788
3789 err = ohci_enable(&ohci->card, NULL, 0);
3790 if (err)
3791 return err;
3792
3793 ohci_resume_iso_dma(ohci);
3794
3795 return 0;
3796 }
3797
3798 static const struct pci_device_id pci_table[] = {
3799 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3800 { }
3801 };
3802
3803 MODULE_DEVICE_TABLE(pci, pci_table);
3804
3805 static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
3806
3807 static struct pci_driver fw_ohci_pci_driver = {
3808 .name = ohci_driver_name,
3809 .id_table = pci_table,
3810 .probe = pci_probe,
3811 .remove = pci_remove,
3812 .driver.pm = &pci_pm_ops,
3813 };
3814
fw_ohci_init(void)3815 static int __init fw_ohci_init(void)
3816 {
3817 return pci_register_driver(&fw_ohci_pci_driver);
3818 }
3819
fw_ohci_cleanup(void)3820 static void __exit fw_ohci_cleanup(void)
3821 {
3822 pci_unregister_driver(&fw_ohci_pci_driver);
3823 }
3824
3825 module_init(fw_ohci_init);
3826 module_exit(fw_ohci_cleanup);
3827
3828 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3829 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3830 MODULE_LICENSE("GPL");
3831
3832 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3833 MODULE_ALIAS("ohci1394");
3834