1 /* 2 * Copyright (C) 2017 Chelsio Communications. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 */ 17 18 #include "t4_regs.h" 19 #include "cxgb4.h" 20 #include "cxgb4_cudbg.h" 21 #include "cudbg_entity.h" 22 23 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { 24 { CUDBG_EDC0, cudbg_collect_edc0_meminfo }, 25 { CUDBG_EDC1, cudbg_collect_edc1_meminfo }, 26 }; 27 28 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { 29 { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, 30 { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, 31 { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, 32 { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 }, 33 { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 }, 34 { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp }, 35 { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 }, 36 { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 }, 37 { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi }, 38 { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 }, 39 { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 }, 40 { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 }, 41 { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 }, 42 { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge }, 43 { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, 44 { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, 45 { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, 46 { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, 47 { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 }, 48 { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, 49 { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, 50 { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, 51 { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, 52 { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, 53 }; 54 55 static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) 56 { 57 u32 value, n = 0, len = 0; 58 59 switch (entity) { 60 case CUDBG_REG_DUMP: 61 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 62 case CHELSIO_T4: 63 len = T4_REGMAP_SIZE; 64 break; 65 case CHELSIO_T5: 66 case CHELSIO_T6: 67 len = T5_REGMAP_SIZE; 68 break; 69 default: 70 break; 71 } 72 break; 73 case CUDBG_DEV_LOG: 74 len = adap->params.devlog.size; 75 break; 76 case CUDBG_CIM_IBQ_TP0: 77 case CUDBG_CIM_IBQ_TP1: 78 case CUDBG_CIM_IBQ_ULP: 79 case CUDBG_CIM_IBQ_SGE0: 80 case CUDBG_CIM_IBQ_SGE1: 81 case CUDBG_CIM_IBQ_NCSI: 82 len = CIM_IBQ_SIZE * 4 * sizeof(u32); 83 break; 84 case CUDBG_CIM_OBQ_ULP0: 85 case CUDBG_CIM_OBQ_ULP1: 86 case CUDBG_CIM_OBQ_ULP2: 87 case CUDBG_CIM_OBQ_ULP3: 88 case CUDBG_CIM_OBQ_SGE: 89 case CUDBG_CIM_OBQ_NCSI: 90 case CUDBG_CIM_OBQ_RXQ0: 91 case CUDBG_CIM_OBQ_RXQ1: 92 len = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32); 93 break; 94 case CUDBG_EDC0: 95 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 96 if (value & EDRAM0_ENABLE_F) { 97 value = t4_read_reg(adap, MA_EDRAM0_BAR_A); 98 len = EDRAM0_SIZE_G(value); 99 } 100 len = cudbg_mbytes_to_bytes(len); 101 break; 102 case CUDBG_EDC1: 103 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 104 if (value & EDRAM1_ENABLE_F) { 105 value = t4_read_reg(adap, MA_EDRAM1_BAR_A); 106 len = EDRAM1_SIZE_G(value); 107 } 108 len = cudbg_mbytes_to_bytes(len); 109 break; 110 case CUDBG_TP_INDIRECT: 111 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 112 case CHELSIO_T5: 113 n = sizeof(t5_tp_pio_array) + 114 sizeof(t5_tp_tm_pio_array) + 115 sizeof(t5_tp_mib_index_array); 116 break; 117 case CHELSIO_T6: 118 n = sizeof(t6_tp_pio_array) + 119 sizeof(t6_tp_tm_pio_array) + 120 sizeof(t6_tp_mib_index_array); 121 break; 122 default: 123 break; 124 } 125 n = n / (IREG_NUM_ELEM * sizeof(u32)); 126 len = sizeof(struct ireg_buf) * n; 127 break; 128 case CUDBG_SGE_INDIRECT: 129 len = sizeof(struct ireg_buf) * 2; 130 break; 131 case CUDBG_PCIE_INDIRECT: 132 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); 133 len = sizeof(struct ireg_buf) * n * 2; 134 break; 135 case CUDBG_PM_INDIRECT: 136 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); 137 len = sizeof(struct ireg_buf) * n * 2; 138 break; 139 case CUDBG_MA_INDIRECT: 140 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 141 n = sizeof(t6_ma_ireg_array) / 142 (IREG_NUM_ELEM * sizeof(u32)); 143 len = sizeof(struct ireg_buf) * n * 2; 144 } 145 break; 146 case CUDBG_UP_CIM_INDIRECT: 147 n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); 148 len = sizeof(struct ireg_buf) * n; 149 break; 150 case CUDBG_MBOX_LOG: 151 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; 152 break; 153 case CUDBG_HMA_INDIRECT: 154 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 155 n = sizeof(t6_hma_ireg_array) / 156 (IREG_NUM_ELEM * sizeof(u32)); 157 len = sizeof(struct ireg_buf) * n; 158 } 159 break; 160 default: 161 break; 162 } 163 164 return len; 165 } 166 167 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) 168 { 169 u32 i, entity; 170 u32 len = 0; 171 172 if (flag & CXGB4_ETH_DUMP_HW) { 173 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) { 174 entity = cxgb4_collect_hw_dump[i].entity; 175 len += cxgb4_get_entity_length(adap, entity); 176 } 177 } 178 179 if (flag & CXGB4_ETH_DUMP_MEM) { 180 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) { 181 entity = cxgb4_collect_mem_dump[i].entity; 182 len += cxgb4_get_entity_length(adap, entity); 183 } 184 } 185 186 return len; 187 } 188 189 static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init, 190 struct cudbg_buffer *dbg_buff, 191 const struct cxgb4_collect_entity *e_arr, 192 u32 arr_size, void *buf, u32 *tot_size) 193 { 194 struct adapter *adap = pdbg_init->adap; 195 struct cudbg_error cudbg_err = { 0 }; 196 struct cudbg_entity_hdr *entity_hdr; 197 u32 entity_size, i; 198 u32 total_size = 0; 199 int ret; 200 201 for (i = 0; i < arr_size; i++) { 202 const struct cxgb4_collect_entity *e = &e_arr[i]; 203 204 /* Skip entities that won't fit in output buffer */ 205 entity_size = cxgb4_get_entity_length(adap, e->entity); 206 if (entity_size > 207 pdbg_init->outbuf_size - *tot_size - total_size) 208 continue; 209 210 entity_hdr = cudbg_get_entity_hdr(buf, e->entity); 211 entity_hdr->entity_type = e->entity; 212 entity_hdr->start_offset = dbg_buff->offset; 213 memset(&cudbg_err, 0, sizeof(struct cudbg_error)); 214 ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err); 215 if (ret) { 216 entity_hdr->size = 0; 217 dbg_buff->offset = entity_hdr->start_offset; 218 } else { 219 cudbg_align_debug_buffer(dbg_buff, entity_hdr); 220 } 221 222 /* Log error and continue with next entity */ 223 if (cudbg_err.sys_err) 224 ret = CUDBG_SYSTEM_ERROR; 225 226 entity_hdr->hdr_flags = ret; 227 entity_hdr->sys_err = cudbg_err.sys_err; 228 entity_hdr->sys_warn = cudbg_err.sys_warn; 229 total_size += entity_hdr->size; 230 } 231 232 *tot_size += total_size; 233 } 234 235 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, 236 u32 flag) 237 { 238 struct cudbg_init cudbg_init = { 0 }; 239 struct cudbg_buffer dbg_buff = { 0 }; 240 u32 size, min_size, total_size = 0; 241 struct cudbg_hdr *cudbg_hdr; 242 243 size = *buf_size; 244 245 cudbg_init.adap = adap; 246 cudbg_init.outbuf = buf; 247 cudbg_init.outbuf_size = size; 248 249 dbg_buff.data = buf; 250 dbg_buff.size = size; 251 dbg_buff.offset = 0; 252 253 cudbg_hdr = (struct cudbg_hdr *)buf; 254 cudbg_hdr->signature = CUDBG_SIGNATURE; 255 cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr); 256 cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION; 257 cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION; 258 cudbg_hdr->max_entities = CUDBG_MAX_ENTITY; 259 cudbg_hdr->chip_ver = adap->params.chip; 260 cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI; 261 cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE; 262 263 min_size = sizeof(struct cudbg_hdr) + 264 sizeof(struct cudbg_entity_hdr) * 265 cudbg_hdr->max_entities; 266 if (size < min_size) 267 return -ENOMEM; 268 269 dbg_buff.offset += min_size; 270 total_size = dbg_buff.offset; 271 272 if (flag & CXGB4_ETH_DUMP_HW) 273 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, 274 cxgb4_collect_hw_dump, 275 ARRAY_SIZE(cxgb4_collect_hw_dump), 276 buf, 277 &total_size); 278 279 if (flag & CXGB4_ETH_DUMP_MEM) 280 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, 281 cxgb4_collect_mem_dump, 282 ARRAY_SIZE(cxgb4_collect_mem_dump), 283 buf, 284 &total_size); 285 286 cudbg_hdr->data_len = total_size; 287 *buf_size = total_size; 288 return 0; 289 } 290 291 void cxgb4_init_ethtool_dump(struct adapter *adapter) 292 { 293 adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE; 294 adapter->eth_dump.version = adapter->params.fw_vers; 295 adapter->eth_dump.len = 0; 296 } 297