1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #ifndef __RTW_HCI_H__ 6 #define __RTW_HCI_H__ 7 8 /* ops for PCI, USB and SDIO */ 9 struct rtw_hci_ops { 10 int (*tx_write)(struct rtw_dev *rtwdev, 11 struct rtw_tx_pkt_info *pkt_info, 12 struct sk_buff *skb); 13 void (*tx_kick_off)(struct rtw_dev *rtwdev); 14 void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop); 15 int (*setup)(struct rtw_dev *rtwdev); 16 int (*start)(struct rtw_dev *rtwdev); 17 void (*stop)(struct rtw_dev *rtwdev); 18 void (*deep_ps)(struct rtw_dev *rtwdev, bool enter); 19 void (*link_ps)(struct rtw_dev *rtwdev, bool enter); 20 void (*interface_cfg)(struct rtw_dev *rtwdev); 21 void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable); 22 23 int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size); 24 int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size); 25 26 u8 (*read8)(struct rtw_dev *rtwdev, u32 addr); 27 u16 (*read16)(struct rtw_dev *rtwdev, u32 addr); 28 u32 (*read32)(struct rtw_dev *rtwdev, u32 addr); 29 void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val); 30 void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val); 31 void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val); 32 }; 33 34 static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev, 35 struct rtw_tx_pkt_info *pkt_info, 36 struct sk_buff *skb) 37 { 38 return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb); 39 } 40 41 static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev) 42 { 43 return rtwdev->hci.ops->tx_kick_off(rtwdev); 44 } 45 46 static inline int rtw_hci_setup(struct rtw_dev *rtwdev) 47 { 48 return rtwdev->hci.ops->setup(rtwdev); 49 } 50 51 static inline int rtw_hci_start(struct rtw_dev *rtwdev) 52 { 53 return rtwdev->hci.ops->start(rtwdev); 54 } 55 56 static inline void rtw_hci_stop(struct rtw_dev *rtwdev) 57 { 58 rtwdev->hci.ops->stop(rtwdev); 59 } 60 61 static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter) 62 { 63 rtwdev->hci.ops->deep_ps(rtwdev, enter); 64 } 65 66 static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter) 67 { 68 rtwdev->hci.ops->link_ps(rtwdev, enter); 69 } 70 71 static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev) 72 { 73 rtwdev->hci.ops->interface_cfg(rtwdev); 74 } 75 76 static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) 77 { 78 if (rtwdev->hci.ops->dynamic_rx_agg) 79 rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable); 80 } 81 82 static inline int 83 rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) 84 { 85 return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size); 86 } 87 88 static inline int 89 rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 90 { 91 return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size); 92 } 93 94 static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr) 95 { 96 return rtwdev->hci.ops->read8(rtwdev, addr); 97 } 98 99 static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr) 100 { 101 return rtwdev->hci.ops->read16(rtwdev, addr); 102 } 103 104 static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr) 105 { 106 return rtwdev->hci.ops->read32(rtwdev, addr); 107 } 108 109 static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 110 { 111 rtwdev->hci.ops->write8(rtwdev, addr, val); 112 } 113 114 static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 115 { 116 rtwdev->hci.ops->write16(rtwdev, addr, val); 117 } 118 119 static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 120 { 121 rtwdev->hci.ops->write32(rtwdev, addr, val); 122 } 123 124 static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit) 125 { 126 u8 val; 127 128 val = rtw_read8(rtwdev, addr); 129 rtw_write8(rtwdev, addr, val | bit); 130 } 131 132 static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit) 133 { 134 u16 val; 135 136 val = rtw_read16(rtwdev, addr); 137 rtw_write16(rtwdev, addr, val | bit); 138 } 139 140 static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit) 141 { 142 u32 val; 143 144 val = rtw_read32(rtwdev, addr); 145 rtw_write32(rtwdev, addr, val | bit); 146 } 147 148 static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit) 149 { 150 u8 val; 151 152 val = rtw_read8(rtwdev, addr); 153 rtw_write8(rtwdev, addr, val & ~bit); 154 } 155 156 static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit) 157 { 158 u16 val; 159 160 val = rtw_read16(rtwdev, addr); 161 rtw_write16(rtwdev, addr, val & ~bit); 162 } 163 164 static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit) 165 { 166 u32 val; 167 168 val = rtw_read32(rtwdev, addr); 169 rtw_write32(rtwdev, addr, val & ~bit); 170 } 171 172 static inline u32 173 rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 174 u32 addr, u32 mask) 175 { 176 u32 val; 177 178 lockdep_assert_held(&rtwdev->mutex); 179 180 val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask); 181 182 return val; 183 } 184 185 static inline void 186 rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 187 u32 addr, u32 mask, u32 data) 188 { 189 lockdep_assert_held(&rtwdev->mutex); 190 191 rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data); 192 } 193 194 static inline u32 195 rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) 196 { 197 u32 shift = __ffs(mask); 198 u32 orig; 199 u32 ret; 200 201 orig = rtw_read32(rtwdev, addr); 202 ret = (orig & mask) >> shift; 203 204 return ret; 205 } 206 207 static inline u16 208 rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) 209 { 210 u32 shift = __ffs(mask); 211 u32 orig; 212 u32 ret; 213 214 orig = rtw_read16(rtwdev, addr); 215 ret = (orig & mask) >> shift; 216 217 return ret; 218 } 219 220 static inline u8 221 rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) 222 { 223 u32 shift = __ffs(mask); 224 u32 orig; 225 u32 ret; 226 227 orig = rtw_read8(rtwdev, addr); 228 ret = (orig & mask) >> shift; 229 230 return ret; 231 } 232 233 static inline void 234 rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) 235 { 236 u32 shift = __ffs(mask); 237 u32 orig; 238 u32 set; 239 240 WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr); 241 242 orig = rtw_read32(rtwdev, addr); 243 set = (orig & ~mask) | ((data << shift) & mask); 244 rtw_write32(rtwdev, addr, set); 245 } 246 247 static inline void 248 rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data) 249 { 250 u32 shift; 251 u8 orig, set; 252 253 mask &= 0xff; 254 shift = __ffs(mask); 255 256 orig = rtw_read8(rtwdev, addr); 257 set = (orig & ~mask) | ((data << shift) & mask); 258 rtw_write8(rtwdev, addr, set); 259 } 260 261 static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev) 262 { 263 return rtwdev->hci.type; 264 } 265 266 static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues, 267 bool drop) 268 { 269 if (rtwdev->hci.ops->flush_queues) 270 rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); 271 } 272 273 static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop) 274 { 275 if (rtwdev->hci.ops->flush_queues) 276 rtwdev->hci.ops->flush_queues(rtwdev, 277 BIT(rtwdev->hw->queues) - 1, 278 drop); 279 } 280 281 #endif 282