1 /******************************************************************************* 2 This contains the functions to handle the enhanced descriptors. 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 23 *******************************************************************************/ 24 25 #include <linux/stmmac.h> 26 #include "common.h" 27 #include "descs_com.h" 28 29 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 30 struct dma_desc *p, void __iomem *ioaddr) 31 { 32 int ret = 0; 33 struct net_device_stats *stats = (struct net_device_stats *)data; 34 35 if (unlikely(p->des01.etx.error_summary)) { 36 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); 37 if (unlikely(p->des01.etx.jabber_timeout)) { 38 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); 39 x->tx_jabber++; 40 } 41 42 if (unlikely(p->des01.etx.frame_flushed)) { 43 CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); 44 x->tx_frame_flushed++; 45 dwmac_dma_flush_tx_fifo(ioaddr); 46 } 47 48 if (unlikely(p->des01.etx.loss_carrier)) { 49 CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); 50 x->tx_losscarrier++; 51 stats->tx_carrier_errors++; 52 } 53 if (unlikely(p->des01.etx.no_carrier)) { 54 CHIP_DBG(KERN_ERR "\tno_carrier error\n"); 55 x->tx_carrier++; 56 stats->tx_carrier_errors++; 57 } 58 if (unlikely(p->des01.etx.late_collision)) { 59 CHIP_DBG(KERN_ERR "\tlate_collision error\n"); 60 stats->collisions += p->des01.etx.collision_count; 61 } 62 if (unlikely(p->des01.etx.excessive_collisions)) { 63 CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); 64 stats->collisions += p->des01.etx.collision_count; 65 } 66 if (unlikely(p->des01.etx.excessive_deferral)) { 67 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); 68 x->tx_deferred++; 69 } 70 71 if (unlikely(p->des01.etx.underflow_error)) { 72 CHIP_DBG(KERN_ERR "\tunderflow error\n"); 73 dwmac_dma_flush_tx_fifo(ioaddr); 74 x->tx_underflow++; 75 } 76 77 if (unlikely(p->des01.etx.ip_header_error)) { 78 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); 79 x->tx_ip_header_error++; 80 } 81 82 if (unlikely(p->des01.etx.payload_error)) { 83 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); 84 x->tx_payload_error++; 85 dwmac_dma_flush_tx_fifo(ioaddr); 86 } 87 88 ret = -1; 89 } 90 91 if (unlikely(p->des01.etx.deferred)) { 92 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); 93 x->tx_deferred++; 94 } 95 #ifdef STMMAC_VLAN_TAG_USED 96 if (p->des01.etx.vlan_frame) { 97 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); 98 x->tx_vlan++; 99 } 100 #endif 101 102 return ret; 103 } 104 105 static int enh_desc_get_tx_len(struct dma_desc *p) 106 { 107 return p->des01.etx.buffer1_size; 108 } 109 110 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) 111 { 112 int ret = good_frame; 113 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 114 115 /* bits 5 7 0 | Frame status 116 * ---------------------------------------------------------- 117 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) 118 * 1 0 0 | IPv4/6 No CSUM errorS. 119 * 1 0 1 | IPv4/6 CSUM PAYLOAD error 120 * 1 1 0 | IPv4/6 CSUM IP HR error 121 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS 122 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD 123 * 0 1 1 | COE bypassed.. no IPv4/6 frame 124 * 0 1 0 | Reserved. 125 */ 126 if (status == 0x0) { 127 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); 128 ret = llc_snap; 129 } else if (status == 0x4) { 130 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); 131 ret = good_frame; 132 } else if (status == 0x5) { 133 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); 134 ret = csum_none; 135 } else if (status == 0x6) { 136 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); 137 ret = csum_none; 138 } else if (status == 0x7) { 139 CHIP_DBG(KERN_ERR 140 "RX Des0 status: IPv4/6 Header and Payload Error.\n"); 141 ret = csum_none; 142 } else if (status == 0x1) { 143 CHIP_DBG(KERN_ERR 144 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); 145 ret = discard_frame; 146 } else if (status == 0x3) { 147 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); 148 ret = discard_frame; 149 } 150 return ret; 151 } 152 153 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 154 struct dma_desc *p) 155 { 156 int ret = good_frame; 157 struct net_device_stats *stats = (struct net_device_stats *)data; 158 159 if (unlikely(p->des01.erx.error_summary)) { 160 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", 161 p->des01.erx); 162 if (unlikely(p->des01.erx.descriptor_error)) { 163 CHIP_DBG(KERN_ERR "\tdescriptor error\n"); 164 x->rx_desc++; 165 stats->rx_length_errors++; 166 } 167 if (unlikely(p->des01.erx.overflow_error)) { 168 CHIP_DBG(KERN_ERR "\toverflow error\n"); 169 x->rx_gmac_overflow++; 170 } 171 172 if (unlikely(p->des01.erx.ipc_csum_error)) 173 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); 174 175 if (unlikely(p->des01.erx.late_collision)) { 176 CHIP_DBG(KERN_ERR "\tlate_collision error\n"); 177 stats->collisions++; 178 stats->collisions++; 179 } 180 if (unlikely(p->des01.erx.receive_watchdog)) { 181 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); 182 x->rx_watchdog++; 183 } 184 if (unlikely(p->des01.erx.error_gmii)) { 185 CHIP_DBG(KERN_ERR "\tReceive Error\n"); 186 x->rx_mii++; 187 } 188 if (unlikely(p->des01.erx.crc_error)) { 189 CHIP_DBG(KERN_ERR "\tCRC error\n"); 190 x->rx_crc++; 191 stats->rx_crc_errors++; 192 } 193 ret = discard_frame; 194 } 195 196 /* After a payload csum error, the ES bit is set. 197 * It doesn't match with the information reported into the databook. 198 * At any rate, we need to understand if the CSUM hw computation is ok 199 * and report this info to the upper layers. */ 200 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 201 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 202 203 if (unlikely(p->des01.erx.dribbling)) { 204 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 205 x->dribbling_bit++; 206 } 207 if (unlikely(p->des01.erx.sa_filter_fail)) { 208 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); 209 x->sa_rx_filter_fail++; 210 ret = discard_frame; 211 } 212 if (unlikely(p->des01.erx.da_filter_fail)) { 213 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); 214 x->da_rx_filter_fail++; 215 ret = discard_frame; 216 } 217 if (unlikely(p->des01.erx.length_error)) { 218 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); 219 x->rx_length++; 220 ret = discard_frame; 221 } 222 #ifdef STMMAC_VLAN_TAG_USED 223 if (p->des01.erx.vlan_tag) { 224 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); 225 x->rx_vlan++; 226 } 227 #endif 228 return ret; 229 } 230 231 static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 232 int disable_rx_ic) 233 { 234 int i; 235 for (i = 0; i < ring_size; i++) { 236 p->des01.erx.own = 1; 237 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; 238 239 ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 240 241 if (disable_rx_ic) 242 p->des01.erx.disable_ic = 1; 243 p++; 244 } 245 } 246 247 static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 248 { 249 int i; 250 251 for (i = 0; i < ring_size; i++) { 252 p->des01.etx.own = 0; 253 ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); 254 p++; 255 } 256 } 257 258 static int enh_desc_get_tx_owner(struct dma_desc *p) 259 { 260 return p->des01.etx.own; 261 } 262 263 static int enh_desc_get_rx_owner(struct dma_desc *p) 264 { 265 return p->des01.erx.own; 266 } 267 268 static void enh_desc_set_tx_owner(struct dma_desc *p) 269 { 270 p->des01.etx.own = 1; 271 } 272 273 static void enh_desc_set_rx_owner(struct dma_desc *p) 274 { 275 p->des01.erx.own = 1; 276 } 277 278 static int enh_desc_get_tx_ls(struct dma_desc *p) 279 { 280 return p->des01.etx.last_segment; 281 } 282 283 static void enh_desc_release_tx_desc(struct dma_desc *p) 284 { 285 int ter = p->des01.etx.end_ring; 286 287 memset(p, 0, offsetof(struct dma_desc, des2)); 288 enh_desc_end_tx_desc(p, ter); 289 } 290 291 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 292 int csum_flag) 293 { 294 p->des01.etx.first_segment = is_fs; 295 296 enh_set_tx_desc_len(p, len); 297 298 if (likely(csum_flag)) 299 p->des01.etx.checksum_insertion = cic_full; 300 } 301 302 static void enh_desc_clear_tx_ic(struct dma_desc *p) 303 { 304 p->des01.etx.interrupt = 0; 305 } 306 307 static void enh_desc_close_tx_desc(struct dma_desc *p) 308 { 309 p->des01.etx.last_segment = 1; 310 p->des01.etx.interrupt = 1; 311 } 312 313 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) 314 { 315 /* The type-1 checksum offload engines append the checksum at 316 * the end of frame and the two bytes of checksum are added in 317 * the length. 318 * Adjust for that in the framelen for type-1 checksum offload 319 * engines. */ 320 if (rx_coe_type == STMMAC_RX_COE_TYPE1) 321 return p->des01.erx.frame_length - 2; 322 else 323 return p->des01.erx.frame_length; 324 } 325 326 const struct stmmac_desc_ops enh_desc_ops = { 327 .tx_status = enh_desc_get_tx_status, 328 .rx_status = enh_desc_get_rx_status, 329 .get_tx_len = enh_desc_get_tx_len, 330 .init_rx_desc = enh_desc_init_rx_desc, 331 .init_tx_desc = enh_desc_init_tx_desc, 332 .get_tx_owner = enh_desc_get_tx_owner, 333 .get_rx_owner = enh_desc_get_rx_owner, 334 .release_tx_desc = enh_desc_release_tx_desc, 335 .prepare_tx_desc = enh_desc_prepare_tx_desc, 336 .clear_tx_ic = enh_desc_clear_tx_ic, 337 .close_tx_desc = enh_desc_close_tx_desc, 338 .get_tx_ls = enh_desc_get_tx_ls, 339 .set_tx_owner = enh_desc_set_tx_owner, 340 .set_rx_owner = enh_desc_set_rx_owner, 341 .get_rx_frame_len = enh_desc_get_rx_frame_len, 342 }; 343