1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include "enetc.h" 5 6 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, 7 struct enetc_cbdr *cbdr) 8 { 9 int size = bd_count * sizeof(struct enetc_cbd); 10 11 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, 12 GFP_KERNEL); 13 if (!cbdr->bd_base) 14 return -ENOMEM; 15 16 /* h/w requires 128B alignment */ 17 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { 18 dma_free_coherent(dev, size, cbdr->bd_base, 19 cbdr->bd_dma_base); 20 return -EINVAL; 21 } 22 23 cbdr->next_to_clean = 0; 24 cbdr->next_to_use = 0; 25 cbdr->dma_dev = dev; 26 cbdr->bd_count = bd_count; 27 28 cbdr->pir = hw->reg + ENETC_SICBDRPIR; 29 cbdr->cir = hw->reg + ENETC_SICBDRCIR; 30 cbdr->mr = hw->reg + ENETC_SICBDRMR; 31 32 /* set CBDR cache attributes */ 33 enetc_wr(hw, ENETC_SICAR2, 34 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 35 36 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); 37 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); 38 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); 39 40 enetc_wr_reg(cbdr->pir, cbdr->next_to_clean); 41 enetc_wr_reg(cbdr->cir, cbdr->next_to_use); 42 /* enable ring */ 43 enetc_wr_reg(cbdr->mr, BIT(31)); 44 45 return 0; 46 } 47 EXPORT_SYMBOL_GPL(enetc_setup_cbdr); 48 49 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr) 50 { 51 int size = cbdr->bd_count * sizeof(struct enetc_cbd); 52 53 /* disable ring */ 54 enetc_wr_reg(cbdr->mr, 0); 55 56 dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base, 57 cbdr->bd_dma_base); 58 cbdr->bd_base = NULL; 59 cbdr->dma_dev = NULL; 60 } 61 EXPORT_SYMBOL_GPL(enetc_teardown_cbdr); 62 63 int enetc4_setup_cbdr(struct enetc_si *si) 64 { 65 struct ntmp_user *user = &si->ntmp_user; 66 struct device *dev = &si->pdev->dev; 67 struct enetc_hw *hw = &si->hw; 68 struct netc_cbdr_regs regs; 69 70 user->cbdr_num = 1; 71 user->dev = dev; 72 user->ring = devm_kcalloc(dev, user->cbdr_num, 73 sizeof(struct netc_cbdr), GFP_KERNEL); 74 if (!user->ring) 75 return -ENOMEM; 76 77 /* set CBDR cache attributes */ 78 enetc_wr(hw, ENETC_SICAR2, 79 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 80 81 regs.pir = hw->reg + ENETC_SICBDRPIR; 82 regs.cir = hw->reg + ENETC_SICBDRCIR; 83 regs.mr = hw->reg + ENETC_SICBDRMR; 84 regs.bar0 = hw->reg + ENETC_SICBDRBAR0; 85 regs.bar1 = hw->reg + ENETC_SICBDRBAR1; 86 regs.lenr = hw->reg + ENETC_SICBDRLENR; 87 88 return ntmp_init_cbdr(user->ring, dev, ®s); 89 } 90 EXPORT_SYMBOL_GPL(enetc4_setup_cbdr); 91 92 void enetc4_teardown_cbdr(struct enetc_si *si) 93 { 94 struct ntmp_user *user = &si->ntmp_user; 95 96 ntmp_free_cbdr(user->ring); 97 user->dev = NULL; 98 } 99 EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr); 100 101 static void enetc_clean_cbdr(struct enetc_cbdr *ring) 102 { 103 struct enetc_cbd *dest_cbd; 104 int i, status; 105 106 i = ring->next_to_clean; 107 108 while (enetc_rd_reg(ring->cir) != i) { 109 dest_cbd = ENETC_CBD(*ring, i); 110 status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; 111 if (status) 112 dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n", 113 status, dest_cbd->cmd); 114 115 memset(dest_cbd, 0, sizeof(*dest_cbd)); 116 117 i = (i + 1) % ring->bd_count; 118 } 119 120 ring->next_to_clean = i; 121 } 122 123 static int enetc_cbd_unused(struct enetc_cbdr *r) 124 { 125 return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) % 126 r->bd_count; 127 } 128 129 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) 130 { 131 struct enetc_cbdr *ring = &si->cbd_ring; 132 int timeout = ENETC_CBDR_TIMEOUT; 133 struct enetc_cbd *dest_cbd; 134 int i; 135 136 if (unlikely(!ring->bd_base)) 137 return -EIO; 138 139 if (unlikely(!enetc_cbd_unused(ring))) 140 enetc_clean_cbdr(ring); 141 142 i = ring->next_to_use; 143 dest_cbd = ENETC_CBD(*ring, i); 144 145 /* copy command to the ring */ 146 *dest_cbd = *cbd; 147 i = (i + 1) % ring->bd_count; 148 149 ring->next_to_use = i; 150 /* let H/W know BD ring has been updated */ 151 enetc_wr_reg(ring->pir, i); 152 153 do { 154 if (enetc_rd_reg(ring->cir) == i) 155 break; 156 udelay(10); /* cannot sleep, rtnl_lock() */ 157 timeout -= 10; 158 } while (timeout); 159 160 if (!timeout) 161 return -EBUSY; 162 163 /* CBD may writeback data, feedback up level */ 164 *cbd = *dest_cbd; 165 166 enetc_clean_cbdr(ring); 167 168 return 0; 169 } 170 EXPORT_SYMBOL_GPL(enetc_send_cmd); 171 172 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index) 173 { 174 struct enetc_cbd cbd; 175 176 memset(&cbd, 0, sizeof(cbd)); 177 178 cbd.cls = 1; 179 cbd.status_flags = ENETC_CBD_FLAGS_SF; 180 cbd.index = cpu_to_le16(index); 181 182 return enetc_send_cmd(si, &cbd); 183 } 184 EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry); 185 186 int enetc_set_mac_flt_entry(struct enetc_si *si, int index, 187 char *mac_addr, int si_map) 188 { 189 struct enetc_cbd cbd; 190 u32 upper; 191 u16 lower; 192 193 memset(&cbd, 0, sizeof(cbd)); 194 195 /* fill up the "set" descriptor */ 196 cbd.cls = 1; 197 cbd.status_flags = ENETC_CBD_FLAGS_SF; 198 cbd.index = cpu_to_le16(index); 199 cbd.opt[3] = cpu_to_le32(si_map); 200 /* enable entry */ 201 cbd.opt[0] = cpu_to_le32(BIT(31)); 202 203 upper = *(const u32 *)mac_addr; 204 lower = *(const u16 *)(mac_addr + 4); 205 cbd.addr[0] = cpu_to_le32(upper); 206 cbd.addr[1] = cpu_to_le32(lower); 207 208 return enetc_send_cmd(si, &cbd); 209 } 210 EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry); 211 212 /* Set entry in RFS table */ 213 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, 214 int index) 215 { 216 struct enetc_cbdr *ring = &si->cbd_ring; 217 struct enetc_cbd cbd = {.cmd = 0}; 218 void *tmp, *tmp_align; 219 dma_addr_t dma; 220 int err; 221 222 /* fill up the "set" descriptor */ 223 cbd.cmd = 0; 224 cbd.cls = 4; 225 cbd.index = cpu_to_le16(index); 226 cbd.opt[3] = cpu_to_le32(0); /* SI */ 227 228 tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse), 229 &dma, &tmp_align); 230 if (!tmp) 231 return -ENOMEM; 232 233 memcpy(tmp_align, rfse, sizeof(*rfse)); 234 235 err = enetc_send_cmd(si, &cbd); 236 if (err) 237 dev_err(ring->dma_dev, "FS entry add failed (%d)!", err); 238 239 enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma); 240 241 return err; 242 } 243 EXPORT_SYMBOL_GPL(enetc_set_fs_entry); 244 245 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, 246 bool read) 247 { 248 struct enetc_cbdr *ring = &si->cbd_ring; 249 struct enetc_cbd cbd = {.cmd = 0}; 250 u8 *tmp, *tmp_align; 251 dma_addr_t dma; 252 int err, i; 253 254 if (count < ENETC_CBD_DATA_MEM_ALIGN) 255 /* HW only takes in a full 64 entry table */ 256 return -EINVAL; 257 258 tmp = enetc_cbd_alloc_data_mem(si, &cbd, count, 259 &dma, (void *)&tmp_align); 260 if (!tmp) 261 return -ENOMEM; 262 263 if (!read) 264 for (i = 0; i < count; i++) 265 tmp_align[i] = (u8)(table[i]); 266 267 /* fill up the descriptor */ 268 cbd.cmd = read ? 2 : 1; 269 cbd.cls = 3; 270 271 err = enetc_send_cmd(si, &cbd); 272 if (err) 273 dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err); 274 275 if (read) 276 for (i = 0; i < count; i++) 277 table[i] = tmp_align[i]; 278 279 enetc_cbd_free_data_mem(si, count, tmp, &dma); 280 281 return err; 282 } 283 284 /* Get RSS table */ 285 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count) 286 { 287 return enetc_cmd_rss_table(si, table, count, true); 288 } 289 EXPORT_SYMBOL_GPL(enetc_get_rss_table); 290 291 /* Set RSS table */ 292 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) 293 { 294 return enetc_cmd_rss_table(si, (u32 *)table, count, false); 295 } 296 EXPORT_SYMBOL_GPL(enetc_set_rss_table); 297 298 int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count) 299 { 300 return ntmp_rsst_query_entry(&si->ntmp_user, table, count); 301 } 302 EXPORT_SYMBOL_GPL(enetc4_get_rss_table); 303 304 int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count) 305 { 306 return ntmp_rsst_update_entry(&si->ntmp_user, table, count); 307 } 308 EXPORT_SYMBOL_GPL(enetc4_set_rss_table); 309