1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include "sparx5_main_regs.h" 8 #include "sparx5_main.h" 9 10 #define XTR_EOF_0 ntohl((__force __be32)0x80000000u) 11 #define XTR_EOF_1 ntohl((__force __be32)0x80000001u) 12 #define XTR_EOF_2 ntohl((__force __be32)0x80000002u) 13 #define XTR_EOF_3 ntohl((__force __be32)0x80000003u) 14 #define XTR_PRUNED ntohl((__force __be32)0x80000004u) 15 #define XTR_ABORT ntohl((__force __be32)0x80000005u) 16 #define XTR_ESCAPE ntohl((__force __be32)0x80000006u) 17 #define XTR_NOT_READY ntohl((__force __be32)0x80000007u) 18 19 #define XTR_VALID_BYTES(x) (4 - ((x) & 3)) 20 21 #define INJ_TIMEOUT_NS 50000 22 23 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) 24 { 25 /* Start flush */ 26 spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); 27 28 /* Allow to drain */ 29 mdelay(1); 30 31 /* All Queues normal */ 32 spx5_wr(0, sparx5, QS_XTR_FLUSH); 33 } 34 35 void sparx5_ifh_parse(struct sparx5 *sparx5, u32 *ifh, struct frame_info *info) 36 { 37 u8 *xtr_hdr = (u8 *)ifh; 38 39 /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */ 40 u32 fwd = 41 ((u32)xtr_hdr[27] << 24) | 42 ((u32)xtr_hdr[28] << 16) | 43 ((u32)xtr_hdr[29] << 8) | 44 ((u32)xtr_hdr[30] << 0); 45 fwd = (fwd >> 5); 46 info->src_port = spx5_field_get(GENMASK(is_sparx5(sparx5) ? 7 : 6, 1), 47 fwd); 48 49 /* 50 * Bit 270-271 are occasionally unexpectedly set by the hardware, 51 * clear bits before extracting timestamp 52 */ 53 info->timestamp = 54 ((u64)(xtr_hdr[2] & GENMASK(5, 0)) << 24) | 55 ((u64)xtr_hdr[3] << 16) | 56 ((u64)xtr_hdr[4] << 8) | 57 ((u64)xtr_hdr[5] << 0); 58 } 59 60 static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) 61 { 62 bool eof_flag = false, pruned_flag = false, abort_flag = false; 63 struct net_device *netdev; 64 struct sparx5_port *port; 65 struct frame_info fi; 66 int i, byte_cnt = 0; 67 struct sk_buff *skb; 68 u32 ifh[IFH_LEN]; 69 u32 *rxbuf; 70 71 /* Get IFH */ 72 for (i = 0; i < IFH_LEN; i++) 73 ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); 74 75 /* Decode IFH (what's needed) */ 76 sparx5_ifh_parse(sparx5, ifh, &fi); 77 78 /* Map to port netdev */ 79 port = fi.src_port < sparx5->data->consts->n_ports ? 80 sparx5->ports[fi.src_port] : NULL; 81 if (!port || !port->ndev) { 82 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); 83 sparx5_xtr_flush(sparx5, grp); 84 return; 85 } 86 87 /* Have netdev, get skb */ 88 netdev = port->ndev; 89 skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN); 90 if (!skb) { 91 sparx5_xtr_flush(sparx5, grp); 92 dev_err(sparx5->dev, "No skb allocated\n"); 93 netdev->stats.rx_dropped++; 94 return; 95 } 96 rxbuf = (u32 *)skb->data; 97 98 /* Now, pull frame data */ 99 while (!eof_flag) { 100 u32 val = spx5_rd(sparx5, QS_XTR_RD(grp)); 101 u32 cmp = val; 102 103 if (byte_swap) 104 cmp = ntohl((__force __be32)val); 105 106 switch (cmp) { 107 case XTR_NOT_READY: 108 break; 109 case XTR_ABORT: 110 /* No accompanying data */ 111 abort_flag = true; 112 eof_flag = true; 113 break; 114 case XTR_EOF_0: 115 case XTR_EOF_1: 116 case XTR_EOF_2: 117 case XTR_EOF_3: 118 /* This assumes STATUS_WORD_POS == 1, Status 119 * just after last data 120 */ 121 if (!byte_swap) 122 val = ntohl((__force __be32)val); 123 byte_cnt -= (4 - XTR_VALID_BYTES(val)); 124 eof_flag = true; 125 break; 126 case XTR_PRUNED: 127 /* But get the last 4 bytes as well */ 128 eof_flag = true; 129 pruned_flag = true; 130 fallthrough; 131 case XTR_ESCAPE: 132 *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp)); 133 byte_cnt += 4; 134 rxbuf++; 135 break; 136 default: 137 *rxbuf = val; 138 byte_cnt += 4; 139 rxbuf++; 140 } 141 } 142 143 if (abort_flag || pruned_flag || !eof_flag) { 144 netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n", 145 abort_flag, pruned_flag, eof_flag); 146 kfree_skb(skb); 147 netdev->stats.rx_dropped++; 148 return; 149 } 150 151 /* Everything we see on an interface that is in the HW bridge 152 * has already been forwarded 153 */ 154 if (test_bit(port->portno, sparx5->bridge_mask)) 155 skb->offload_fwd_mark = 1; 156 157 /* Finish up skb */ 158 skb_put(skb, byte_cnt - ETH_FCS_LEN); 159 eth_skb_pad(skb); 160 sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); 161 skb->protocol = eth_type_trans(skb, netdev); 162 netdev->stats.rx_bytes += skb->len; 163 netdev->stats.rx_packets++; 164 netif_rx(skb); 165 } 166 167 static int sparx5_inject(struct sparx5 *sparx5, 168 u32 *ifh, 169 struct sk_buff *skb, 170 struct net_device *ndev) 171 { 172 int grp = INJ_QUEUE; 173 u32 val, w, count; 174 u8 *buf; 175 176 val = spx5_rd(sparx5, QS_INJ_STATUS); 177 if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) { 178 pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n", 179 QS_INJ_STATUS_FIFO_RDY_GET(val)); 180 return -EBUSY; 181 } 182 183 /* Indicate SOF */ 184 spx5_wr(QS_INJ_CTRL_SOF_SET(1) | 185 QS_INJ_CTRL_GAP_SIZE_SET(1), 186 sparx5, QS_INJ_CTRL(grp)); 187 188 /* Write the IFH to the chip. */ 189 for (w = 0; w < IFH_LEN; w++) 190 spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp)); 191 192 /* Write words, round up */ 193 count = DIV_ROUND_UP(skb->len, 4); 194 buf = skb->data; 195 for (w = 0; w < count; w++, buf += 4) { 196 val = get_unaligned((const u32 *)buf); 197 spx5_wr(val, sparx5, QS_INJ_WR(grp)); 198 } 199 200 /* Add padding */ 201 while (w < (60 / 4)) { 202 spx5_wr(0, sparx5, QS_INJ_WR(grp)); 203 w++; 204 } 205 206 /* Indicate EOF and valid bytes in last word */ 207 spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | 208 QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) | 209 QS_INJ_CTRL_EOF_SET(1), 210 sparx5, QS_INJ_CTRL(grp)); 211 212 /* Add dummy CRC */ 213 spx5_wr(0, sparx5, QS_INJ_WR(grp)); 214 w++; 215 216 val = spx5_rd(sparx5, QS_INJ_STATUS); 217 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { 218 struct sparx5_port *port = netdev_priv(ndev); 219 220 pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n", 221 QS_INJ_STATUS_WMARK_REACHED_GET(val)); 222 netif_stop_queue(ndev); 223 hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS, 224 HRTIMER_MODE_REL); 225 } 226 227 return NETDEV_TX_OK; 228 } 229 230 netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) 231 { 232 struct net_device_stats *stats = &dev->stats; 233 struct sparx5_port *port = netdev_priv(dev); 234 struct sparx5 *sparx5 = port->sparx5; 235 u32 ifh[IFH_LEN]; 236 netdev_tx_t ret; 237 238 memset(ifh, 0, IFH_LEN * 4); 239 sparx5_set_port_ifh(sparx5, ifh, port->portno); 240 241 if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 242 if (sparx5_ptp_txtstamp_request(port, skb) < 0) 243 return NETDEV_TX_BUSY; 244 245 sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op); 246 sparx5_set_port_ifh_pdu_type(sparx5, ifh, 247 SPARX5_SKB_CB(skb)->pdu_type); 248 sparx5_set_port_ifh_pdu_w16_offset(sparx5, ifh, 249 SPARX5_SKB_CB(skb)->pdu_w16_offset); 250 sparx5_set_port_ifh_timestamp(sparx5, ifh, 251 SPARX5_SKB_CB(skb)->ts_id); 252 } 253 254 skb_tx_timestamp(skb); 255 spin_lock(&sparx5->tx_lock); 256 if (sparx5->fdma_irq > 0) 257 ret = sparx5_fdma_xmit(sparx5, ifh, skb); 258 else 259 ret = sparx5_inject(sparx5, ifh, skb, dev); 260 spin_unlock(&sparx5->tx_lock); 261 262 if (ret == -EBUSY) 263 goto busy; 264 if (ret < 0) 265 goto drop; 266 267 stats->tx_bytes += skb->len; 268 stats->tx_packets++; 269 sparx5->tx.packets++; 270 271 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 272 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) 273 return NETDEV_TX_OK; 274 275 dev_consume_skb_any(skb); 276 return NETDEV_TX_OK; 277 drop: 278 stats->tx_dropped++; 279 sparx5->tx.dropped++; 280 dev_kfree_skb_any(skb); 281 return NETDEV_TX_OK; 282 busy: 283 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 284 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) 285 sparx5_ptp_txtstamp_release(port, skb); 286 return NETDEV_TX_BUSY; 287 } 288 289 static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) 290 { 291 struct sparx5_port *port = container_of(tmr, struct sparx5_port, 292 inj_timer); 293 int grp = INJ_QUEUE; 294 u32 val; 295 296 val = spx5_rd(port->sparx5, QS_INJ_STATUS); 297 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { 298 pr_err_ratelimited("Injection: Reset watermark count\n"); 299 /* Reset Watermark count to restart */ 300 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 301 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 302 port->sparx5, 303 DSM_DEV_TX_STOP_WM_CFG(port->portno)); 304 } 305 netif_wake_queue(port->ndev); 306 return HRTIMER_NORESTART; 307 } 308 309 int sparx5_manual_injection_mode(struct sparx5 *sparx5) 310 { 311 const int byte_swap = 1; 312 int portno; 313 314 /* Change mode to manual extraction and injection */ 315 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) | 316 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | 317 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), 318 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); 319 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) | 320 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), 321 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); 322 323 /* CPU ports capture setup */ 324 for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0); 325 portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); 326 portno++) { 327 /* ASM CPU port: No preamble, IFH, enable padding */ 328 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | 329 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | 330 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ 331 sparx5, ASM_PORT_CFG(portno)); 332 333 /* Reset WM cnt to unclog queued frames */ 334 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 335 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 336 sparx5, 337 DSM_DEV_TX_STOP_WM_CFG(portno)); 338 339 /* Set Disassembler Stop Watermark level */ 340 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0), 341 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, 342 sparx5, 343 DSM_DEV_TX_STOP_WM_CFG(portno)); 344 345 /* Enable Disassembler buffer underrun watchdog 346 */ 347 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0), 348 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, 349 sparx5, 350 DSM_BUF_CFG(portno)); 351 } 352 return 0; 353 } 354 355 irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5) 356 { 357 struct sparx5 *s5 = _sparx5; 358 int poll = 64; 359 360 /* Check data in queue */ 361 while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0) 362 sparx5_xtr_grp(s5, XTR_QUEUE, false); 363 364 return IRQ_HANDLED; 365 } 366 367 void sparx5_port_inj_timer_setup(struct sparx5_port *port) 368 { 369 hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 370 port->inj_timer.function = sparx5_injection_timeout; 371 } 372