xref: /linux/drivers/net/wwan/t7xx/t7xx_hif_cldma.h (revision 39d439047f1dc88f98b755d6f3a53a4ef8f0de21)
1*39d43904SHaijun Liu /* SPDX-License-Identifier: GPL-2.0-only
2*39d43904SHaijun Liu  *
3*39d43904SHaijun Liu  * Copyright (c) 2021, MediaTek Inc.
4*39d43904SHaijun Liu  * Copyright (c) 2021-2022, Intel Corporation.
5*39d43904SHaijun Liu  *
6*39d43904SHaijun Liu  * Authors:
7*39d43904SHaijun Liu  *  Haijun Liu <haijun.liu@mediatek.com>
8*39d43904SHaijun Liu  *  Moises Veleta <moises.veleta@intel.com>
9*39d43904SHaijun Liu  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
10*39d43904SHaijun Liu  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
11*39d43904SHaijun Liu  *
12*39d43904SHaijun Liu  * Contributors:
13*39d43904SHaijun Liu  *  Amir Hanania <amir.hanania@intel.com>
14*39d43904SHaijun Liu  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15*39d43904SHaijun Liu  *  Eliot Lee <eliot.lee@intel.com>
16*39d43904SHaijun Liu  */
17*39d43904SHaijun Liu 
18*39d43904SHaijun Liu #ifndef __T7XX_HIF_CLDMA_H__
19*39d43904SHaijun Liu #define __T7XX_HIF_CLDMA_H__
20*39d43904SHaijun Liu 
21*39d43904SHaijun Liu #include <linux/bits.h>
22*39d43904SHaijun Liu #include <linux/device.h>
23*39d43904SHaijun Liu #include <linux/dmapool.h>
24*39d43904SHaijun Liu #include <linux/pci.h>
25*39d43904SHaijun Liu #include <linux/skbuff.h>
26*39d43904SHaijun Liu #include <linux/spinlock.h>
27*39d43904SHaijun Liu #include <linux/wait.h>
28*39d43904SHaijun Liu #include <linux/workqueue.h>
29*39d43904SHaijun Liu #include <linux/types.h>
30*39d43904SHaijun Liu 
31*39d43904SHaijun Liu #include "t7xx_cldma.h"
32*39d43904SHaijun Liu #include "t7xx_pci.h"
33*39d43904SHaijun Liu 
34*39d43904SHaijun Liu /**
35*39d43904SHaijun Liu  * enum cldma_id - Identifiers for CLDMA HW units.
36*39d43904SHaijun Liu  * @CLDMA_ID_MD: Modem control channel.
37*39d43904SHaijun Liu  * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
38*39d43904SHaijun Liu  * @CLDMA_NUM:   Number of CLDMA HW units available.
39*39d43904SHaijun Liu  */
40*39d43904SHaijun Liu enum cldma_id {
41*39d43904SHaijun Liu 	CLDMA_ID_MD,
42*39d43904SHaijun Liu 	CLDMA_ID_AP,
43*39d43904SHaijun Liu 	CLDMA_NUM
44*39d43904SHaijun Liu };
45*39d43904SHaijun Liu 
46*39d43904SHaijun Liu struct cldma_gpd {
47*39d43904SHaijun Liu 	u8 flags;
48*39d43904SHaijun Liu 	u8 not_used1;
49*39d43904SHaijun Liu 	__le16 rx_data_allow_len;
50*39d43904SHaijun Liu 	__le32 next_gpd_ptr_h;
51*39d43904SHaijun Liu 	__le32 next_gpd_ptr_l;
52*39d43904SHaijun Liu 	__le32 data_buff_bd_ptr_h;
53*39d43904SHaijun Liu 	__le32 data_buff_bd_ptr_l;
54*39d43904SHaijun Liu 	__le16 data_buff_len;
55*39d43904SHaijun Liu 	__le16 not_used2;
56*39d43904SHaijun Liu };
57*39d43904SHaijun Liu 
58*39d43904SHaijun Liu struct cldma_request {
59*39d43904SHaijun Liu 	struct cldma_gpd *gpd;	/* Virtual address for CPU */
60*39d43904SHaijun Liu 	dma_addr_t gpd_addr;	/* Physical address for DMA */
61*39d43904SHaijun Liu 	struct sk_buff *skb;
62*39d43904SHaijun Liu 	dma_addr_t mapped_buff;
63*39d43904SHaijun Liu 	struct list_head entry;
64*39d43904SHaijun Liu };
65*39d43904SHaijun Liu 
66*39d43904SHaijun Liu struct cldma_ring {
67*39d43904SHaijun Liu 	struct list_head gpd_ring;	/* Ring of struct cldma_request */
68*39d43904SHaijun Liu 	unsigned int length;		/* Number of struct cldma_request */
69*39d43904SHaijun Liu 	int pkt_size;
70*39d43904SHaijun Liu };
71*39d43904SHaijun Liu 
72*39d43904SHaijun Liu struct cldma_queue {
73*39d43904SHaijun Liu 	struct cldma_ctrl *md_ctrl;
74*39d43904SHaijun Liu 	enum mtk_txrx dir;
75*39d43904SHaijun Liu 	unsigned int index;
76*39d43904SHaijun Liu 	struct cldma_ring *tr_ring;
77*39d43904SHaijun Liu 	struct cldma_request *tr_done;
78*39d43904SHaijun Liu 	struct cldma_request *rx_refill;
79*39d43904SHaijun Liu 	struct cldma_request *tx_next;
80*39d43904SHaijun Liu 	int budget;			/* Same as ring buffer size by default */
81*39d43904SHaijun Liu 	spinlock_t ring_lock;
82*39d43904SHaijun Liu 	wait_queue_head_t req_wq;	/* Only for TX */
83*39d43904SHaijun Liu 	struct workqueue_struct *worker;
84*39d43904SHaijun Liu 	struct work_struct cldma_work;
85*39d43904SHaijun Liu };
86*39d43904SHaijun Liu 
87*39d43904SHaijun Liu struct cldma_ctrl {
88*39d43904SHaijun Liu 	enum cldma_id hif_id;
89*39d43904SHaijun Liu 	struct device *dev;
90*39d43904SHaijun Liu 	struct t7xx_pci_dev *t7xx_dev;
91*39d43904SHaijun Liu 	struct cldma_queue txq[CLDMA_TXQ_NUM];
92*39d43904SHaijun Liu 	struct cldma_queue rxq[CLDMA_RXQ_NUM];
93*39d43904SHaijun Liu 	unsigned short txq_active;
94*39d43904SHaijun Liu 	unsigned short rxq_active;
95*39d43904SHaijun Liu 	unsigned short txq_started;
96*39d43904SHaijun Liu 	spinlock_t cldma_lock; /* Protects CLDMA structure */
97*39d43904SHaijun Liu 	/* Assumes T/R GPD/BD/SPD have the same size */
98*39d43904SHaijun Liu 	struct dma_pool *gpd_dmapool;
99*39d43904SHaijun Liu 	struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
100*39d43904SHaijun Liu 	struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
101*39d43904SHaijun Liu 	struct t7xx_cldma_hw hw_info;
102*39d43904SHaijun Liu 	bool is_late_init;
103*39d43904SHaijun Liu 	int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
104*39d43904SHaijun Liu };
105*39d43904SHaijun Liu 
106*39d43904SHaijun Liu #define GPD_FLAGS_HWO		BIT(0)
107*39d43904SHaijun Liu #define GPD_FLAGS_IOC		BIT(7)
108*39d43904SHaijun Liu #define GPD_DMAPOOL_ALIGN	16
109*39d43904SHaijun Liu 
110*39d43904SHaijun Liu #define CLDMA_MTU		3584	/* 3.5kB */
111*39d43904SHaijun Liu 
112*39d43904SHaijun Liu int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
113*39d43904SHaijun Liu void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
114*39d43904SHaijun Liu int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
115*39d43904SHaijun Liu void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
116*39d43904SHaijun Liu void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
117*39d43904SHaijun Liu void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
118*39d43904SHaijun Liu int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
119*39d43904SHaijun Liu void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
120*39d43904SHaijun Liu void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
121*39d43904SHaijun Liu 			     int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
122*39d43904SHaijun Liu int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
123*39d43904SHaijun Liu void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
124*39d43904SHaijun Liu void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
125*39d43904SHaijun Liu 
126*39d43904SHaijun Liu #endif /* __T7XX_HIF_CLDMA_H__ */
127