1*b4c3e9b5SBjoern A. Zeeb /*
2*b4c3e9b5SBjoern A. Zeeb * Copyright (c) 2010 Broadcom Corporation
3*b4c3e9b5SBjoern A. Zeeb *
4*b4c3e9b5SBjoern A. Zeeb * Permission to use, copy, modify, and/or distribute this software for any
5*b4c3e9b5SBjoern A. Zeeb * purpose with or without fee is hereby granted, provided that the above
6*b4c3e9b5SBjoern A. Zeeb * copyright notice and this permission notice appear in all copies.
7*b4c3e9b5SBjoern A. Zeeb *
8*b4c3e9b5SBjoern A. Zeeb * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9*b4c3e9b5SBjoern A. Zeeb * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10*b4c3e9b5SBjoern A. Zeeb * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11*b4c3e9b5SBjoern A. Zeeb * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12*b4c3e9b5SBjoern A. Zeeb * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13*b4c3e9b5SBjoern A. Zeeb * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14*b4c3e9b5SBjoern A. Zeeb * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15*b4c3e9b5SBjoern A. Zeeb */
16*b4c3e9b5SBjoern A. Zeeb
17*b4c3e9b5SBjoern A. Zeeb #ifndef _BRCM_DMA_H_
18*b4c3e9b5SBjoern A. Zeeb #define _BRCM_DMA_H_
19*b4c3e9b5SBjoern A. Zeeb
20*b4c3e9b5SBjoern A. Zeeb #include <linux/delay.h>
21*b4c3e9b5SBjoern A. Zeeb #include <linux/skbuff.h>
22*b4c3e9b5SBjoern A. Zeeb #include "types.h" /* forward structure declarations */
23*b4c3e9b5SBjoern A. Zeeb
24*b4c3e9b5SBjoern A. Zeeb /* map/unmap direction */
25*b4c3e9b5SBjoern A. Zeeb #define DMA_TX 1 /* TX direction for DMA */
26*b4c3e9b5SBjoern A. Zeeb #define DMA_RX 2 /* RX direction for DMA */
27*b4c3e9b5SBjoern A. Zeeb
28*b4c3e9b5SBjoern A. Zeeb /* DMA structure:
29*b4c3e9b5SBjoern A. Zeeb * support two DMA engines: 32 bits address or 64 bit addressing
30*b4c3e9b5SBjoern A. Zeeb * basic DMA register set is per channel(transmit or receive)
31*b4c3e9b5SBjoern A. Zeeb * a pair of channels is defined for convenience
32*b4c3e9b5SBjoern A. Zeeb */
33*b4c3e9b5SBjoern A. Zeeb
34*b4c3e9b5SBjoern A. Zeeb /* 32 bits addressing */
35*b4c3e9b5SBjoern A. Zeeb
36*b4c3e9b5SBjoern A. Zeeb struct dma32diag { /* diag access */
37*b4c3e9b5SBjoern A. Zeeb u32 fifoaddr; /* diag address */
38*b4c3e9b5SBjoern A. Zeeb u32 fifodatalow; /* low 32bits of data */
39*b4c3e9b5SBjoern A. Zeeb u32 fifodatahigh; /* high 32bits of data */
40*b4c3e9b5SBjoern A. Zeeb u32 pad; /* reserved */
41*b4c3e9b5SBjoern A. Zeeb };
42*b4c3e9b5SBjoern A. Zeeb
43*b4c3e9b5SBjoern A. Zeeb /* 64 bits addressing */
44*b4c3e9b5SBjoern A. Zeeb
45*b4c3e9b5SBjoern A. Zeeb /* dma registers per channel(xmt or rcv) */
46*b4c3e9b5SBjoern A. Zeeb struct dma64regs {
47*b4c3e9b5SBjoern A. Zeeb u32 control; /* enable, et al */
48*b4c3e9b5SBjoern A. Zeeb u32 ptr; /* last descriptor posted to chip */
49*b4c3e9b5SBjoern A. Zeeb u32 addrlow; /* desc ring base address low 32-bits (8K aligned) */
50*b4c3e9b5SBjoern A. Zeeb u32 addrhigh; /* desc ring base address bits 63:32 (8K aligned) */
51*b4c3e9b5SBjoern A. Zeeb u32 status0; /* current descriptor, xmt state */
52*b4c3e9b5SBjoern A. Zeeb u32 status1; /* active descriptor, xmt error */
53*b4c3e9b5SBjoern A. Zeeb };
54*b4c3e9b5SBjoern A. Zeeb
55*b4c3e9b5SBjoern A. Zeeb /* range param for dma_getnexttxp() and dma_txreclaim */
56*b4c3e9b5SBjoern A. Zeeb enum txd_range {
57*b4c3e9b5SBjoern A. Zeeb DMA_RANGE_ALL = 1,
58*b4c3e9b5SBjoern A. Zeeb DMA_RANGE_TRANSMITTED,
59*b4c3e9b5SBjoern A. Zeeb DMA_RANGE_TRANSFERED
60*b4c3e9b5SBjoern A. Zeeb };
61*b4c3e9b5SBjoern A. Zeeb
62*b4c3e9b5SBjoern A. Zeeb /*
63*b4c3e9b5SBjoern A. Zeeb * Exported data structure (read-only)
64*b4c3e9b5SBjoern A. Zeeb */
65*b4c3e9b5SBjoern A. Zeeb /* export structure */
66*b4c3e9b5SBjoern A. Zeeb struct dma_pub {
67*b4c3e9b5SBjoern A. Zeeb uint txavail; /* # free tx descriptors */
68*b4c3e9b5SBjoern A. Zeeb uint dmactrlflags; /* dma control flags */
69*b4c3e9b5SBjoern A. Zeeb
70*b4c3e9b5SBjoern A. Zeeb /* rx error counters */
71*b4c3e9b5SBjoern A. Zeeb uint rxgiants; /* rx giant frames */
72*b4c3e9b5SBjoern A. Zeeb uint rxnobuf; /* rx out of dma descriptors */
73*b4c3e9b5SBjoern A. Zeeb /* tx error counters */
74*b4c3e9b5SBjoern A. Zeeb uint txnobuf; /* tx out of dma descriptors */
75*b4c3e9b5SBjoern A. Zeeb };
76*b4c3e9b5SBjoern A. Zeeb
77*b4c3e9b5SBjoern A. Zeeb extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
78*b4c3e9b5SBjoern A. Zeeb uint txregbase, uint rxregbase,
79*b4c3e9b5SBjoern A. Zeeb uint ntxd, uint nrxd,
80*b4c3e9b5SBjoern A. Zeeb uint rxbufsize, int rxextheadroom,
81*b4c3e9b5SBjoern A. Zeeb uint nrxpost, uint rxoffset);
82*b4c3e9b5SBjoern A. Zeeb
83*b4c3e9b5SBjoern A. Zeeb void dma_rxinit(struct dma_pub *pub);
84*b4c3e9b5SBjoern A. Zeeb int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
85*b4c3e9b5SBjoern A. Zeeb bool dma_rxfill(struct dma_pub *pub);
86*b4c3e9b5SBjoern A. Zeeb bool dma_rxreset(struct dma_pub *pub);
87*b4c3e9b5SBjoern A. Zeeb bool dma_txreset(struct dma_pub *pub);
88*b4c3e9b5SBjoern A. Zeeb void dma_txinit(struct dma_pub *pub);
89*b4c3e9b5SBjoern A. Zeeb int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
90*b4c3e9b5SBjoern A. Zeeb struct sk_buff *p0);
91*b4c3e9b5SBjoern A. Zeeb int dma_txpending(struct dma_pub *pub);
92*b4c3e9b5SBjoern A. Zeeb void dma_kick_tx(struct dma_pub *pub);
93*b4c3e9b5SBjoern A. Zeeb void dma_txsuspend(struct dma_pub *pub);
94*b4c3e9b5SBjoern A. Zeeb bool dma_txsuspended(struct dma_pub *pub);
95*b4c3e9b5SBjoern A. Zeeb void dma_txresume(struct dma_pub *pub);
96*b4c3e9b5SBjoern A. Zeeb void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
97*b4c3e9b5SBjoern A. Zeeb void dma_rxreclaim(struct dma_pub *pub);
98*b4c3e9b5SBjoern A. Zeeb void dma_detach(struct dma_pub *pub);
99*b4c3e9b5SBjoern A. Zeeb unsigned long dma_getvar(struct dma_pub *pub, const char *name);
100*b4c3e9b5SBjoern A. Zeeb struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
101*b4c3e9b5SBjoern A. Zeeb void dma_counterreset(struct dma_pub *pub);
102*b4c3e9b5SBjoern A. Zeeb
103*b4c3e9b5SBjoern A. Zeeb void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
104*b4c3e9b5SBjoern A. Zeeb (void *pkt, void *arg_a), void *arg_a);
105*b4c3e9b5SBjoern A. Zeeb
106*b4c3e9b5SBjoern A. Zeeb /*
107*b4c3e9b5SBjoern A. Zeeb * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
108*b4c3e9b5SBjoern A. Zeeb * the packet length is not updated yet (by DMA) on the expected time.
109*b4c3e9b5SBjoern A. Zeeb * Workaround is to hold processor till DMA updates the length, and stay off
110*b4c3e9b5SBjoern A. Zeeb * the bus to allow DMA update the length in buffer
111*b4c3e9b5SBjoern A. Zeeb */
dma_spin_for_len(uint len,struct sk_buff * head)112*b4c3e9b5SBjoern A. Zeeb static inline void dma_spin_for_len(uint len, struct sk_buff *head)
113*b4c3e9b5SBjoern A. Zeeb {
114*b4c3e9b5SBjoern A. Zeeb #if defined(CONFIG_BCM47XX)
115*b4c3e9b5SBjoern A. Zeeb if (!len) {
116*b4c3e9b5SBjoern A. Zeeb while (!(len = *(u16 *) KSEG1ADDR(head->data)))
117*b4c3e9b5SBjoern A. Zeeb udelay(1);
118*b4c3e9b5SBjoern A. Zeeb
119*b4c3e9b5SBjoern A. Zeeb *(u16 *) (head->data) = cpu_to_le16((u16) len);
120*b4c3e9b5SBjoern A. Zeeb }
121*b4c3e9b5SBjoern A. Zeeb #endif /* defined(CONFIG_BCM47XX) */
122*b4c3e9b5SBjoern A. Zeeb }
123*b4c3e9b5SBjoern A. Zeeb
124*b4c3e9b5SBjoern A. Zeeb #endif /* _BRCM_DMA_H_ */
125