139d43904SHaijun Liu /* SPDX-License-Identifier: GPL-2.0-only 239d43904SHaijun Liu * 339d43904SHaijun Liu * Copyright (c) 2021, MediaTek Inc. 439d43904SHaijun Liu * Copyright (c) 2021-2022, Intel Corporation. 539d43904SHaijun Liu * 639d43904SHaijun Liu * Authors: 739d43904SHaijun Liu * Haijun Liu <haijun.liu@mediatek.com> 839d43904SHaijun Liu * Moises Veleta <moises.veleta@intel.com> 939d43904SHaijun Liu * Ricardo Martinez <ricardo.martinez@linux.intel.com> 1039d43904SHaijun Liu * Sreehari Kancharla <sreehari.kancharla@intel.com> 1139d43904SHaijun Liu * 1239d43904SHaijun Liu * Contributors: 1339d43904SHaijun Liu * Amir Hanania <amir.hanania@intel.com> 1439d43904SHaijun Liu * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 1539d43904SHaijun Liu * Eliot Lee <eliot.lee@intel.com> 1639d43904SHaijun Liu */ 1739d43904SHaijun Liu 1839d43904SHaijun Liu #ifndef __T7XX_HIF_CLDMA_H__ 1939d43904SHaijun Liu #define __T7XX_HIF_CLDMA_H__ 2039d43904SHaijun Liu 2139d43904SHaijun Liu #include <linux/bits.h> 2239d43904SHaijun Liu #include <linux/device.h> 2339d43904SHaijun Liu #include <linux/dmapool.h> 2439d43904SHaijun Liu #include <linux/pci.h> 2539d43904SHaijun Liu #include <linux/skbuff.h> 2639d43904SHaijun Liu #include <linux/spinlock.h> 2739d43904SHaijun Liu #include <linux/wait.h> 2839d43904SHaijun Liu #include <linux/workqueue.h> 2939d43904SHaijun Liu #include <linux/types.h> 3039d43904SHaijun Liu 3139d43904SHaijun Liu #include "t7xx_cldma.h" 3239d43904SHaijun Liu #include "t7xx_pci.h" 3339d43904SHaijun Liu 34*d27553c1SJinjian Song #define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) 35*d27553c1SJinjian Song #define CLDMA_SHARED_Q_BUFF_SZ 3584 36*d27553c1SJinjian Song #define CLDMA_DEDICATED_Q_BUFF_SZ 2048 37*d27553c1SJinjian Song 3839d43904SHaijun Liu /** 3939d43904SHaijun Liu * enum cldma_id - Identifiers for CLDMA HW units. 4039d43904SHaijun Liu * @CLDMA_ID_MD: Modem control channel. 41ba2274dcSJose Ignacio Tornos Martinez * @CLDMA_ID_AP: Application Processor control channel. 4239d43904SHaijun Liu * @CLDMA_NUM: Number of CLDMA HW units available. 4339d43904SHaijun Liu */ 4439d43904SHaijun Liu enum cldma_id { 4539d43904SHaijun Liu CLDMA_ID_MD, 4639d43904SHaijun Liu CLDMA_ID_AP, 4739d43904SHaijun Liu CLDMA_NUM 4839d43904SHaijun Liu }; 4939d43904SHaijun Liu 5039d43904SHaijun Liu struct cldma_gpd { 5139d43904SHaijun Liu u8 flags; 5239d43904SHaijun Liu u8 not_used1; 5339d43904SHaijun Liu __le16 rx_data_allow_len; 5439d43904SHaijun Liu __le32 next_gpd_ptr_h; 5539d43904SHaijun Liu __le32 next_gpd_ptr_l; 5639d43904SHaijun Liu __le32 data_buff_bd_ptr_h; 5739d43904SHaijun Liu __le32 data_buff_bd_ptr_l; 5839d43904SHaijun Liu __le16 data_buff_len; 5939d43904SHaijun Liu __le16 not_used2; 6039d43904SHaijun Liu }; 6139d43904SHaijun Liu 62*d27553c1SJinjian Song enum cldma_cfg { 63*d27553c1SJinjian Song CLDMA_SHARED_Q_CFG, 64*d27553c1SJinjian Song CLDMA_DEDICATED_Q_CFG, 65*d27553c1SJinjian Song }; 66*d27553c1SJinjian Song 6739d43904SHaijun Liu struct cldma_request { 6839d43904SHaijun Liu struct cldma_gpd *gpd; /* Virtual address for CPU */ 6939d43904SHaijun Liu dma_addr_t gpd_addr; /* Physical address for DMA */ 7039d43904SHaijun Liu struct sk_buff *skb; 7139d43904SHaijun Liu dma_addr_t mapped_buff; 7239d43904SHaijun Liu struct list_head entry; 7339d43904SHaijun Liu }; 7439d43904SHaijun Liu 7539d43904SHaijun Liu struct cldma_ring { 7639d43904SHaijun Liu struct list_head gpd_ring; /* Ring of struct cldma_request */ 7739d43904SHaijun Liu unsigned int length; /* Number of struct cldma_request */ 7839d43904SHaijun Liu int pkt_size; 7939d43904SHaijun Liu }; 8039d43904SHaijun Liu 8139d43904SHaijun Liu struct cldma_queue { 8239d43904SHaijun Liu struct cldma_ctrl *md_ctrl; 8339d43904SHaijun Liu enum mtk_txrx dir; 8439d43904SHaijun Liu unsigned int index; 8539d43904SHaijun Liu struct cldma_ring *tr_ring; 8639d43904SHaijun Liu struct cldma_request *tr_done; 8739d43904SHaijun Liu struct cldma_request *rx_refill; 8839d43904SHaijun Liu struct cldma_request *tx_next; 8939d43904SHaijun Liu int budget; /* Same as ring buffer size by default */ 9039d43904SHaijun Liu spinlock_t ring_lock; 9139d43904SHaijun Liu wait_queue_head_t req_wq; /* Only for TX */ 9239d43904SHaijun Liu struct workqueue_struct *worker; 9339d43904SHaijun Liu struct work_struct cldma_work; 94*d27553c1SJinjian Song int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); 9539d43904SHaijun Liu }; 9639d43904SHaijun Liu 9739d43904SHaijun Liu struct cldma_ctrl { 9839d43904SHaijun Liu enum cldma_id hif_id; 9939d43904SHaijun Liu struct device *dev; 10039d43904SHaijun Liu struct t7xx_pci_dev *t7xx_dev; 10139d43904SHaijun Liu struct cldma_queue txq[CLDMA_TXQ_NUM]; 10239d43904SHaijun Liu struct cldma_queue rxq[CLDMA_RXQ_NUM]; 10339d43904SHaijun Liu unsigned short txq_active; 10439d43904SHaijun Liu unsigned short rxq_active; 10539d43904SHaijun Liu unsigned short txq_started; 10639d43904SHaijun Liu spinlock_t cldma_lock; /* Protects CLDMA structure */ 10739d43904SHaijun Liu /* Assumes T/R GPD/BD/SPD have the same size */ 10839d43904SHaijun Liu struct dma_pool *gpd_dmapool; 10939d43904SHaijun Liu struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; 11039d43904SHaijun Liu struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; 11146e8f49eSHaijun Liu struct md_pm_entity *pm_entity; 11239d43904SHaijun Liu struct t7xx_cldma_hw hw_info; 11339d43904SHaijun Liu bool is_late_init; 11439d43904SHaijun Liu }; 11539d43904SHaijun Liu 116*d27553c1SJinjian Song #define CLDMA_Q_IDX_DUMP 1 11739d43904SHaijun Liu #define GPD_FLAGS_HWO BIT(0) 11839d43904SHaijun Liu #define GPD_FLAGS_IOC BIT(7) 11939d43904SHaijun Liu #define GPD_DMAPOOL_ALIGN 16 12039d43904SHaijun Liu 12139d43904SHaijun Liu int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); 12239d43904SHaijun Liu void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); 12339d43904SHaijun Liu int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); 12439d43904SHaijun Liu void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); 125*d27553c1SJinjian Song void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id); 12639d43904SHaijun Liu void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); 12739d43904SHaijun Liu int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); 12839d43904SHaijun Liu void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); 129*d27553c1SJinjian Song void t7xx_cldma_set_recv_skb(struct cldma_queue *queue, 13039d43904SHaijun Liu int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); 13139d43904SHaijun Liu int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); 13239d43904SHaijun Liu void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); 13339d43904SHaijun Liu void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); 13439d43904SHaijun Liu 13539d43904SHaijun Liu #endif /* __T7XX_HIF_CLDMA_H__ */ 136