1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, v.1, (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2014-2017 Cavium, Inc. 24 * The contents of this file are subject to the terms of the Common Development 25 * and Distribution License, v.1, (the "License"). 26 27 * You may not use this file except in compliance with the License. 28 29 * You can obtain a copy of the License at available 30 * at http://opensource.org/licenses/CDDL-1.0 31 32 * See the License for the specific language governing permissions and 33 * limitations under the License. 34 */ 35 36 37 #include "qede.h" 38 39 /* 40 * Compliments of Larry W. and qlc team. 41 */ 42 void 43 qede_stacktrace(qede_t *qede) 44 { 45 int depth, i; 46 pc_t pcstack[16]; 47 char *sym; 48 ulong_t off; 49 50 depth = getpcstack(&pcstack[0], 16); 51 52 cmn_err(CE_CONT, "qede(%d): ---------- \n", qede->instance); 53 for (i = 0; i < OSAL_MIN_T(int, depth, 16); i++) { 54 sym = kobj_getsymname((uintptr_t)pcstack[i], &off); 55 56 if (sym == NULL) { 57 cmn_err(CE_CONT, "qede(%d): sym is NULL\n", 58 qede->instance); 59 } else { 60 cmn_err(CE_CONT, "%s(%d): %s+%lx\n", __func__, 61 qede->instance, sym ? sym : "?", off); 62 } 63 } 64 cmn_err(CE_CONT, "qede(%d): ---------- \n", qede->instance); 65 } 66 67 void 68 qede_dbg_ipv6_ext_hdr(qede_tx_pktinfo_t *pktinfo, mblk_t *mp) 69 { 70 struct ether_header *eth_hdr = 71 (struct ether_header *)(void *)mp->b_rptr; 72 ipha_t *ip_hdr; 73 struct ip6_hdr *ipv6hdr = NULL; 74 75 /* mac header type and len */ 76 if (ntohs(eth_hdr->ether_type) == ETHERTYPE_IP) { 77 pktinfo->ether_type = ntohs(eth_hdr->ether_type); 78 pktinfo->mac_hlen = sizeof (struct ether_header); 79 } else if (ntohs(eth_hdr->ether_type) == ETHERTYPE_VLAN) { 80 struct ether_vlan_header *vlan_hdr = 81 (struct ether_vlan_header *)(void *)mp->b_rptr; 82 pktinfo->ether_type = ntohs(vlan_hdr->ether_type); 83 pktinfo->mac_hlen = sizeof (struct ether_vlan_header); 84 } 85 86 ip_hdr = (ipha_t *)(void *)((u8 *)mp->b_rptr + pktinfo->mac_hlen); 87 88 if (IPH_HDR_VERSION(ip_hdr) == IPV6_VERSION) { 89 ipv6hdr = (struct ip6_hdr *)(void *)ip_hdr; 90 91 if (ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt == IPPROTO_IPV6) { 92 cmn_err(CE_NOTE, "%s: ipv6 extenstion header found !", 93 __func__); 94 } 95 } 96 } 97 98 char * 99 qede_get_L4_type(uint16_t parse_flags) 100 { 101 parse_flags = (parse_flags >> PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) 102 & PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK; 103 if (parse_flags == 1) { 104 return ("TCP"); 105 } else if (parse_flags == 2) { 106 return ("UDP"); 107 } else { 108 return ("UNKNOWN"); 109 } 110 } 111 112 char * 113 qede_get_L3_type(uint16_t parse_flags) 114 { 115 parse_flags = (parse_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) 116 & PARSING_AND_ERR_FLAGS_L3TYPE_MASK; 117 if (parse_flags == 1) { 118 return ("IPv4"); 119 } else if (parse_flags == 2) { 120 return ("IPv6"); 121 } else { 122 return ("UNKNOWN"); 123 } 124 } 125 126 127 void 128 qede_print_vport_params(qede_t *qede, 129 struct ecore_sp_vport_update_params *vport_params) 130 { 131 struct ecore_filter_accept_flags *accept_flags; 132 133 accept_flags = &vport_params->accept_flags; 134 135 cmn_err(CE_WARN, "opaque_fid = %d", 136 vport_params->opaque_fid); 137 cmn_err(CE_WARN, "vport_id = %d", 138 vport_params->vport_id); 139 cmn_err(CE_WARN, "update_vport_active_rx_flg = %d", 140 vport_params->update_vport_active_rx_flg); 141 cmn_err(CE_WARN, "vport_active_rx_flg = %d", 142 vport_params->vport_active_rx_flg); 143 cmn_err(CE_WARN, "update_vport_active_tx_flg = %d", 144 vport_params->update_vport_active_tx_flg); 145 cmn_err(CE_WARN, "vport_active_tx_flg = %d", 146 vport_params->vport_active_tx_flg); 147 cmn_err(CE_WARN, "update_inner_vlan_removal_flg = %d", 148 vport_params->update_inner_vlan_removal_flg); 149 cmn_err(CE_WARN, "inner_vlan_removal_flg = %d", 150 vport_params->inner_vlan_removal_flg); 151 cmn_err(CE_WARN, "update_default_vlan_enable_flg = %d", 152 vport_params->update_default_vlan_enable_flg); 153 cmn_err(CE_WARN, "default_vlan_enable_flg = %d", 154 vport_params->default_vlan_enable_flg); 155 cmn_err(CE_WARN, "update_default_vlan_flg = %d", 156 vport_params->update_default_vlan_flg); 157 cmn_err(CE_WARN, "default_vlan = %d", 158 vport_params->default_vlan); 159 cmn_err(CE_WARN, "update_tx_switching_flg = %d", 160 vport_params->update_tx_switching_flg); 161 cmn_err(CE_WARN, "tx_switching_flg = %d", 162 vport_params->tx_switching_flg); 163 cmn_err(CE_WARN, "update_approx_mcast_flg = %d", 164 vport_params->update_approx_mcast_flg); 165 cmn_err(CE_WARN, "update_anti_spoofing_en_flg = %d", 166 vport_params->update_anti_spoofing_en_flg); 167 cmn_err(CE_WARN, "anti_spoofing_en = %d", 168 vport_params->anti_spoofing_en); 169 cmn_err(CE_WARN, "update_accept_any_vlan_flg = %d", 170 vport_params->update_accept_any_vlan_flg); 171 cmn_err(CE_WARN, "accept_any_vlan = %d", 172 vport_params->accept_any_vlan); 173 174 cmn_err(CE_WARN, "update_rx_mode_config; = %d", 175 accept_flags->update_rx_mode_config); 176 cmn_err(CE_WARN, "update_tx_mode_config; = %d", 177 accept_flags->update_tx_mode_config); 178 } 179 180 void 181 qede_dump_bytes(char *buf, int len) 182 { 183 int i; 184 for (i = 0; i < len; i += 8, buf+=8) { 185 cmn_err(CE_NOTE, 186 "!%.02x %.02x %.02x %.02x %.02x %.02x %.02x %.02x", 187 buf[i + 0] & 0xff, buf[i + 1] & 0xff, 188 buf[i + 2] & 0xff, buf[i + 3] & 0xff, 189 buf[i + 4] & 0xff, buf[i + 5] & 0xff, 190 buf[i + 6] & 0xff, buf[i + 7] & 0xff); 191 } 192 } 193 194 void 195 qede_dump_single_mblk(qede_t *qede, mblk_t *mp) 196 { 197 int len = MBLKL(mp); 198 u8 *buf = mp->b_rptr; 199 int i; 200 201 for (i = 0; i < len; i += 8) { 202 cmn_err(CE_NOTE, "!%p: %2x %2x %2x %2x %2x %2x %2x %2x", 203 buf, buf[i], buf[i + 1], 204 buf[i + 2], buf[i + 3], 205 buf[i + 4], buf[i + 5], 206 buf[i + 6], buf[i + 7]); 207 } 208 } 209 210 void 211 qede_dump_mblk_chain_bcont_ptr(qede_t *qede, mblk_t *mp) 212 { 213 mblk_t *bp; 214 int len, num_mblk = 0; 215 int total_len = 0; 216 217 for (bp = mp; bp != NULL; bp = bp->b_cont) { 218 len = MBLKL(bp); 219 total_len += len; 220 num_mblk++; 221 222 qede_info(qede, "b_cont bp len %d", len); 223 qede_dump_single_mblk(qede, bp); 224 } 225 226 qede_info(qede, "Total b_cont mblks %d, total_len %d", 227 num_mblk, total_len); 228 } 229 /* 230 * Loop through all data elements in mp 231 * and print them 232 */ 233 void 234 qede_dump_mblk_chain_bnext_ptr(qede_t *qede, mblk_t *mp) 235 { 236 mblk_t *bp; 237 int len, num_mblk = 0; 238 int total_len = 0; 239 240 for (bp = mp; bp != NULL; bp = bp->b_next) { 241 len = MBLKL(bp); 242 total_len += len; 243 num_mblk++; 244 245 qede_info(qede, "b_next bp len %d", len); 246 } 247 248 qede_info(qede, "Total b_next mblks %d, total_len %d", 249 num_mblk, total_len); 250 } 251 252 void 253 qede_print_intr_ctx(qede_intr_context_t *intr_ctx) 254 { 255 } 256 257 void 258 qede_print_tx_ring(qede_tx_ring_t *tx_ring) 259 { 260 } 261 262 void 263 qede_print_rx_ring(qede_rx_ring_t *rx_ring) 264 { 265 } 266 267 void 268 qede_print_fastpath(qede_fastpath_t *fp) 269 { 270 } 271 272 void 273 qede_print_qede(qede_t *qede) 274 { 275 } 276 277 /* 278 * This function is called from ecore in the init path 279 * just before starting the function 280 */ 281 void 282 qede_debug_before_pf_start(struct ecore_dev *edev, u8 id) 283 { 284 } 285 286 void 287 qede_debug_after_pf_stop(void *cdev, u8 my_id) 288 { 289 } 290 291 292 void 293 qede_dump_reg_cqe(struct eth_fast_path_rx_reg_cqe *cqe) 294 { 295 cmn_err(CE_WARN, "qede_dump_reg_cqe"); 296 cmn_err(CE_WARN, " pkt_len = %d", LE_16(cqe->pkt_len)); 297 cmn_err(CE_WARN, " bd_num = %d", cqe->bd_num); 298 cmn_err(CE_WARN, " len_on_first_bd = %d", 299 LE_16(cqe->len_on_first_bd)); 300 cmn_err(CE_WARN, " placement_offset = %d", cqe->placement_offset); 301 cmn_err(CE_WARN, " vlan_tag = %d", LE_16(cqe->vlan_tag)); 302 cmn_err(CE_WARN, " rss_hash = %d", LE_32(cqe->rss_hash)); 303 cmn_err(CE_WARN, " pars_flags = %x", 304 LE_16((uint16_t)cqe->pars_flags.flags)); 305 cmn_err(CE_WARN, " tunnel_pars_flags = %x", 306 cqe->tunnel_pars_flags.flags); 307 cmn_err(CE_WARN, " bitfields = %x", cqe->bitfields); 308 } 309 310 void 311 qede_dump_start_lro_cqe(struct eth_fast_path_rx_tpa_start_cqe *cqe) 312 { 313 int i; 314 cmn_err(CE_WARN, "qede_dump_start_lro_cqe"); 315 cmn_err(CE_WARN, " tpa_agg_index = %d", cqe->tpa_agg_index); 316 cmn_err(CE_WARN, " seg_len = %d", LE_16(cqe->seg_len)); 317 cmn_err(CE_WARN, " vlan_tag = %d", LE_16(cqe->vlan_tag)); 318 cmn_err(CE_WARN, " rss_hash = %d", LE_32(cqe->rss_hash)); 319 cmn_err(CE_WARN, " len_on_first_bd = %d", 320 LE_16(cqe->len_on_first_bd)); 321 cmn_err(CE_WARN, " placement_offset = %d", cqe->placement_offset); 322 cmn_err(CE_WARN, " header_len = %d", cqe->header_len); 323 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) 324 cmn_err(CE_WARN, " ext_bd_len_list[%d] = %d", i, 325 LE_16(cqe->ext_bd_len_list[i])); 326 cmn_err(CE_WARN, " pars_flags = 0x%x", 327 LE_16((uint16_t)cqe->pars_flags.flags)); 328 cmn_err(CE_WARN, " tunnel_pars_flags = 0x%x", 329 cqe->tunnel_pars_flags.flags); 330 cmn_err(CE_WARN, " bitfields = 0x%x", cqe->bitfields ); 331 } 332 333 void 334 qede_dump_cont_lro_cqe(struct eth_fast_path_rx_tpa_cont_cqe *cqe) 335 { 336 int i; 337 cmn_err(CE_WARN, "qede_dump_cont_lro_cqe"); 338 cmn_err(CE_WARN, " tpa_agg_index = %d", cqe->tpa_agg_index); 339 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 340 cmn_err(CE_WARN, " len_list[%d] = %d", i, 341 LE_16(cqe->len_list[i])); 342 } 343 } 344 345 void 346 qede_dump_end_lro_cqe(struct eth_fast_path_rx_tpa_end_cqe *cqe) 347 { 348 int i; 349 cmn_err(CE_WARN, "qede_dump_end_lro_cqe"); 350 cmn_err(CE_WARN, " tpa_agg_index = %d", cqe->tpa_agg_index ); 351 cmn_err(CE_WARN, " total_packet_len = %d", 352 LE_16(cqe->total_packet_len)); 353 cmn_err(CE_WARN, " num_of_bds = %d", cqe->num_of_bds); 354 cmn_err(CE_WARN, " num_of_coalesced_segs = %d", 355 LE_16(cqe->num_of_coalesced_segs)); 356 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 357 cmn_err(CE_WARN, " len_list[%d] = %d", i, 358 LE_16(cqe->len_list[i])); 359 } 360 cmn_err(CE_WARN, " ts_delta = %d", LE_32(cqe->ts_delta)); 361 } 362