1 /******************************************************************************* 2 This contains the functions to handle the enhanced descriptors. 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 23 *******************************************************************************/ 24 25 #include <linux/stmmac.h> 26 #include "common.h" 27 #include "descs_com.h" 28 29 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 30 struct dma_desc *p, void __iomem *ioaddr) 31 { 32 int ret = 0; 33 struct net_device_stats *stats = (struct net_device_stats *)data; 34 35 if (unlikely(p->des01.etx.error_summary)) { 36 if (unlikely(p->des01.etx.jabber_timeout)) 37 x->tx_jabber++; 38 39 if (unlikely(p->des01.etx.frame_flushed)) { 40 x->tx_frame_flushed++; 41 dwmac_dma_flush_tx_fifo(ioaddr); 42 } 43 44 if (unlikely(p->des01.etx.loss_carrier)) { 45 x->tx_losscarrier++; 46 stats->tx_carrier_errors++; 47 } 48 if (unlikely(p->des01.etx.no_carrier)) { 49 x->tx_carrier++; 50 stats->tx_carrier_errors++; 51 } 52 if (unlikely(p->des01.etx.late_collision)) 53 stats->collisions += p->des01.etx.collision_count; 54 55 if (unlikely(p->des01.etx.excessive_collisions)) 56 stats->collisions += p->des01.etx.collision_count; 57 58 if (unlikely(p->des01.etx.excessive_deferral)) 59 x->tx_deferred++; 60 61 if (unlikely(p->des01.etx.underflow_error)) { 62 dwmac_dma_flush_tx_fifo(ioaddr); 63 x->tx_underflow++; 64 } 65 66 if (unlikely(p->des01.etx.ip_header_error)) 67 x->tx_ip_header_error++; 68 69 if (unlikely(p->des01.etx.payload_error)) { 70 x->tx_payload_error++; 71 dwmac_dma_flush_tx_fifo(ioaddr); 72 } 73 74 ret = -1; 75 } 76 77 if (unlikely(p->des01.etx.deferred)) 78 x->tx_deferred++; 79 80 #ifdef STMMAC_VLAN_TAG_USED 81 if (p->des01.etx.vlan_frame) 82 x->tx_vlan++; 83 #endif 84 85 return ret; 86 } 87 88 static int enh_desc_get_tx_len(struct dma_desc *p) 89 { 90 return p->des01.etx.buffer1_size; 91 } 92 93 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) 94 { 95 int ret = good_frame; 96 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 97 98 /* bits 5 7 0 | Frame status 99 * ---------------------------------------------------------- 100 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) 101 * 1 0 0 | IPv4/6 No CSUM errorS. 102 * 1 0 1 | IPv4/6 CSUM PAYLOAD error 103 * 1 1 0 | IPv4/6 CSUM IP HR error 104 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS 105 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD 106 * 0 1 1 | COE bypassed.. no IPv4/6 frame 107 * 0 1 0 | Reserved. 108 */ 109 if (status == 0x0) 110 ret = llc_snap; 111 else if (status == 0x4) 112 ret = good_frame; 113 else if (status == 0x5) 114 ret = csum_none; 115 else if (status == 0x6) 116 ret = csum_none; 117 else if (status == 0x7) 118 ret = csum_none; 119 else if (status == 0x1) 120 ret = discard_frame; 121 else if (status == 0x3) 122 ret = discard_frame; 123 return ret; 124 } 125 126 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, 127 struct dma_extended_desc *p) 128 { 129 if (unlikely(p->basic.des01.erx.rx_mac_addr)) { 130 if (p->des4.erx.ip_hdr_err) 131 x->ip_hdr_err++; 132 if (p->des4.erx.ip_payload_err) 133 x->ip_payload_err++; 134 if (p->des4.erx.ip_csum_bypassed) 135 x->ip_csum_bypassed++; 136 if (p->des4.erx.ipv4_pkt_rcvd) 137 x->ipv4_pkt_rcvd++; 138 if (p->des4.erx.ipv6_pkt_rcvd) 139 x->ipv6_pkt_rcvd++; 140 if (p->des4.erx.msg_type == RDES_EXT_SYNC) 141 x->rx_msg_type_sync++; 142 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) 143 x->rx_msg_type_follow_up++; 144 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 145 x->rx_msg_type_delay_req++; 146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 147 x->rx_msg_type_delay_resp++; 148 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ) 149 x->rx_msg_type_pdelay_req++; 150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 151 x->rx_msg_type_pdelay_resp++; 152 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) 153 x->rx_msg_type_pdelay_follow_up++; 154 else 155 x->rx_msg_type_ext_no_ptp++; 156 if (p->des4.erx.ptp_frame_type) 157 x->ptp_frame_type++; 158 if (p->des4.erx.ptp_ver) 159 x->ptp_ver++; 160 if (p->des4.erx.timestamp_dropped) 161 x->timestamp_dropped++; 162 if (p->des4.erx.av_pkt_rcvd) 163 x->av_pkt_rcvd++; 164 if (p->des4.erx.av_tagged_pkt_rcvd) 165 x->av_tagged_pkt_rcvd++; 166 if (p->des4.erx.vlan_tag_priority_val) 167 x->vlan_tag_priority_val++; 168 if (p->des4.erx.l3_filter_match) 169 x->l3_filter_match++; 170 if (p->des4.erx.l4_filter_match) 171 x->l4_filter_match++; 172 if (p->des4.erx.l3_l4_filter_no_match) 173 x->l3_l4_filter_no_match++; 174 } 175 } 176 177 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 178 struct dma_desc *p) 179 { 180 int ret = good_frame; 181 struct net_device_stats *stats = (struct net_device_stats *)data; 182 183 if (unlikely(p->des01.erx.error_summary)) { 184 if (unlikely(p->des01.erx.descriptor_error)) { 185 x->rx_desc++; 186 stats->rx_length_errors++; 187 } 188 if (unlikely(p->des01.erx.overflow_error)) 189 x->rx_gmac_overflow++; 190 191 if (unlikely(p->des01.erx.ipc_csum_error)) 192 pr_err("\tIPC Csum Error/Giant frame\n"); 193 194 if (unlikely(p->des01.erx.late_collision)) { 195 stats->collisions++; 196 } 197 if (unlikely(p->des01.erx.receive_watchdog)) 198 x->rx_watchdog++; 199 200 if (unlikely(p->des01.erx.error_gmii)) 201 x->rx_mii++; 202 203 if (unlikely(p->des01.erx.crc_error)) { 204 x->rx_crc++; 205 stats->rx_crc_errors++; 206 } 207 ret = discard_frame; 208 } 209 210 /* After a payload csum error, the ES bit is set. 211 * It doesn't match with the information reported into the databook. 212 * At any rate, we need to understand if the CSUM hw computation is ok 213 * and report this info to the upper layers. */ 214 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 215 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); 216 217 if (unlikely(p->des01.erx.dribbling)) 218 x->dribbling_bit++; 219 220 if (unlikely(p->des01.erx.sa_filter_fail)) { 221 x->sa_rx_filter_fail++; 222 ret = discard_frame; 223 } 224 if (unlikely(p->des01.erx.da_filter_fail)) { 225 x->da_rx_filter_fail++; 226 ret = discard_frame; 227 } 228 if (unlikely(p->des01.erx.length_error)) { 229 x->rx_length++; 230 ret = discard_frame; 231 } 232 #ifdef STMMAC_VLAN_TAG_USED 233 if (p->des01.erx.vlan_tag) 234 x->rx_vlan++; 235 #endif 236 237 return ret; 238 } 239 240 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 241 int mode, int end) 242 { 243 p->des01.all_flags = 0; 244 p->des01.erx.own = 1; 245 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; 246 247 if (mode == STMMAC_CHAIN_MODE) 248 ehn_desc_rx_set_on_chain(p, end); 249 else 250 ehn_desc_rx_set_on_ring(p, end); 251 252 if (disable_rx_ic) 253 p->des01.erx.disable_ic = 1; 254 } 255 256 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) 257 { 258 p->des01.all_flags = 0; 259 if (mode == STMMAC_CHAIN_MODE) 260 ehn_desc_tx_set_on_chain(p, end); 261 else 262 ehn_desc_tx_set_on_ring(p, end); 263 } 264 265 static int enh_desc_get_tx_owner(struct dma_desc *p) 266 { 267 return p->des01.etx.own; 268 } 269 270 static int enh_desc_get_rx_owner(struct dma_desc *p) 271 { 272 return p->des01.erx.own; 273 } 274 275 static void enh_desc_set_tx_owner(struct dma_desc *p) 276 { 277 p->des01.etx.own = 1; 278 } 279 280 static void enh_desc_set_rx_owner(struct dma_desc *p) 281 { 282 p->des01.erx.own = 1; 283 } 284 285 static int enh_desc_get_tx_ls(struct dma_desc *p) 286 { 287 return p->des01.etx.last_segment; 288 } 289 290 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) 291 { 292 int ter = p->des01.etx.end_ring; 293 294 memset(p, 0, offsetof(struct dma_desc, des2)); 295 if (mode == STMMAC_CHAIN_MODE) 296 enh_desc_end_tx_desc_on_chain(p, ter); 297 else 298 enh_desc_end_tx_desc_on_ring(p, ter); 299 } 300 301 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 302 int csum_flag, int mode) 303 { 304 p->des01.etx.first_segment = is_fs; 305 306 if (mode == STMMAC_CHAIN_MODE) 307 enh_set_tx_desc_len_on_chain(p, len); 308 else 309 enh_set_tx_desc_len_on_ring(p, len); 310 311 if (likely(csum_flag)) 312 p->des01.etx.checksum_insertion = cic_full; 313 } 314 315 static void enh_desc_clear_tx_ic(struct dma_desc *p) 316 { 317 p->des01.etx.interrupt = 0; 318 } 319 320 static void enh_desc_close_tx_desc(struct dma_desc *p) 321 { 322 p->des01.etx.last_segment = 1; 323 p->des01.etx.interrupt = 1; 324 } 325 326 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) 327 { 328 /* The type-1 checksum offload engines append the checksum at 329 * the end of frame and the two bytes of checksum are added in 330 * the length. 331 * Adjust for that in the framelen for type-1 checksum offload 332 * engines. */ 333 if (rx_coe_type == STMMAC_RX_COE_TYPE1) 334 return p->des01.erx.frame_length - 2; 335 else 336 return p->des01.erx.frame_length; 337 } 338 339 static void enh_desc_enable_tx_timestamp(struct dma_desc *p) 340 { 341 p->des01.etx.time_stamp_enable = 1; 342 } 343 344 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) 345 { 346 return p->des01.etx.time_stamp_status; 347 } 348 349 static u64 enh_desc_get_timestamp(void *desc, u32 ats) 350 { 351 u64 ns; 352 353 if (ats) { 354 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 355 ns = p->des6; 356 /* convert high/sec time stamp value to nanosecond */ 357 ns += p->des7 * 1000000000ULL; 358 } else { 359 struct dma_desc *p = (struct dma_desc *)desc; 360 ns = p->des2; 361 ns += p->des3 * 1000000000ULL; 362 } 363 364 return ns; 365 } 366 367 static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) 368 { 369 if (ats) { 370 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 371 return p->basic.des01.erx.ipc_csum_error; 372 } else { 373 struct dma_desc *p = (struct dma_desc *)desc; 374 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) 375 /* timestamp is corrupted, hence don't store it */ 376 return 0; 377 else 378 return 1; 379 } 380 } 381 382 const struct stmmac_desc_ops enh_desc_ops = { 383 .tx_status = enh_desc_get_tx_status, 384 .rx_status = enh_desc_get_rx_status, 385 .get_tx_len = enh_desc_get_tx_len, 386 .init_rx_desc = enh_desc_init_rx_desc, 387 .init_tx_desc = enh_desc_init_tx_desc, 388 .get_tx_owner = enh_desc_get_tx_owner, 389 .get_rx_owner = enh_desc_get_rx_owner, 390 .release_tx_desc = enh_desc_release_tx_desc, 391 .prepare_tx_desc = enh_desc_prepare_tx_desc, 392 .clear_tx_ic = enh_desc_clear_tx_ic, 393 .close_tx_desc = enh_desc_close_tx_desc, 394 .get_tx_ls = enh_desc_get_tx_ls, 395 .set_tx_owner = enh_desc_set_tx_owner, 396 .set_rx_owner = enh_desc_set_rx_owner, 397 .get_rx_frame_len = enh_desc_get_rx_frame_len, 398 .rx_extended_status = enh_desc_get_ext_status, 399 .enable_tx_timestamp = enh_desc_enable_tx_timestamp, 400 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, 401 .get_timestamp = enh_desc_get_timestamp, 402 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, 403 }; 404