1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #ifdef CONFIG_DEBUG_FS 5 6 #include <linux/fs.h> 7 #include <linux/debugfs.h> 8 9 #include "i40e.h" 10 11 static struct dentry *i40e_dbg_root; 12 13 /** 14 * i40e_dbg_find_vsi - searches for the vsi with the given seid 15 * @pf: the PF structure to search for the vsi 16 * @seid: seid of the vsi it is searching for 17 **/ 18 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) 19 { 20 int i; 21 22 if (seid < 0) 23 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 24 else 25 for (i = 0; i < pf->num_alloc_vsi; i++) 26 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) 27 return pf->vsi[i]; 28 29 return NULL; 30 } 31 32 /** 33 * i40e_dbg_find_veb - searches for the veb with the given seid 34 * @pf: the PF structure to search for the veb 35 * @seid: seid of the veb it is searching for 36 **/ 37 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) 38 { 39 int i; 40 41 for (i = 0; i < I40E_MAX_VEB; i++) 42 if (pf->veb[i] && pf->veb[i]->seid == seid) 43 return pf->veb[i]; 44 return NULL; 45 } 46 47 /************************************************************** 48 * command 49 * The command entry in debugfs is for giving the driver commands 50 * to be executed - these may be for changing the internal switch 51 * setup, adding or removing filters, or other things. Many of 52 * these will be useful for some forms of unit testing. 53 **************************************************************/ 54 static char i40e_dbg_command_buf[256] = ""; 55 56 /** 57 * i40e_dbg_command_read - read for command datum 58 * @filp: the opened file 59 * @buffer: where to write the data for the user to read 60 * @count: the size of the user's buffer 61 * @ppos: file position offset 62 **/ 63 static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, 64 size_t count, loff_t *ppos) 65 { 66 struct i40e_pf *pf = filp->private_data; 67 int bytes_not_copied; 68 int buf_size = 256; 69 char *buf; 70 int len; 71 72 /* don't allow partial reads */ 73 if (*ppos != 0) 74 return 0; 75 if (count < buf_size) 76 return -ENOSPC; 77 78 buf = kzalloc(buf_size, GFP_KERNEL); 79 if (!buf) 80 return -ENOSPC; 81 82 len = snprintf(buf, buf_size, "%s: %s\n", 83 pf->vsi[pf->lan_vsi]->netdev->name, 84 i40e_dbg_command_buf); 85 86 bytes_not_copied = copy_to_user(buffer, buf, len); 87 kfree(buf); 88 89 if (bytes_not_copied) 90 return -EFAULT; 91 92 *ppos = len; 93 return len; 94 } 95 96 static char *i40e_filter_state_string[] = { 97 "INVALID", 98 "NEW", 99 "ACTIVE", 100 "FAILED", 101 "REMOVE", 102 }; 103 104 /** 105 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum 106 * @pf: the i40e_pf created in command write 107 * @seid: the seid the user put in 108 **/ 109 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) 110 { 111 struct rtnl_link_stats64 *nstat; 112 struct i40e_mac_filter *f; 113 struct i40e_vsi *vsi; 114 int i, bkt; 115 116 vsi = i40e_dbg_find_vsi(pf, seid); 117 if (!vsi) { 118 dev_info(&pf->pdev->dev, 119 "dump %d: seid not found\n", seid); 120 return; 121 } 122 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); 123 if (vsi->netdev) { 124 struct net_device *nd = vsi->netdev; 125 126 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", 127 nd->name, nd->state, nd->flags); 128 dev_info(&pf->pdev->dev, " features = 0x%08lx\n", 129 (unsigned long int)nd->features); 130 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", 131 (unsigned long int)nd->hw_features); 132 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", 133 (unsigned long int)nd->vlan_features); 134 } 135 dev_info(&pf->pdev->dev, 136 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", 137 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); 138 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) 139 dev_info(&pf->pdev->dev, 140 " state[%d] = %08lx\n", 141 i, vsi->state[i]); 142 if (vsi == pf->vsi[pf->lan_vsi]) 143 dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", 144 pf->hw.mac.addr, 145 pf->hw.mac.san_addr, 146 pf->hw.mac.port_addr); 147 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 148 dev_info(&pf->pdev->dev, 149 " mac_filter_hash: %pM vid=%d, state %s\n", 150 f->macaddr, f->vlan, 151 i40e_filter_state_string[f->state]); 152 } 153 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", 154 vsi->active_filters, vsi->promisc_threshold, 155 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? 156 "ON" : "OFF")); 157 nstat = i40e_get_vsi_stats_struct(vsi); 158 dev_info(&pf->pdev->dev, 159 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", 160 (unsigned long int)nstat->rx_packets, 161 (unsigned long int)nstat->rx_bytes, 162 (unsigned long int)nstat->rx_errors, 163 (unsigned long int)nstat->rx_dropped); 164 dev_info(&pf->pdev->dev, 165 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", 166 (unsigned long int)nstat->tx_packets, 167 (unsigned long int)nstat->tx_bytes, 168 (unsigned long int)nstat->tx_errors, 169 (unsigned long int)nstat->tx_dropped); 170 dev_info(&pf->pdev->dev, 171 " net_stats: multicast = %lu, collisions = %lu\n", 172 (unsigned long int)nstat->multicast, 173 (unsigned long int)nstat->collisions); 174 dev_info(&pf->pdev->dev, 175 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", 176 (unsigned long int)nstat->rx_length_errors, 177 (unsigned long int)nstat->rx_over_errors, 178 (unsigned long int)nstat->rx_crc_errors); 179 dev_info(&pf->pdev->dev, 180 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", 181 (unsigned long int)nstat->rx_frame_errors, 182 (unsigned long int)nstat->rx_fifo_errors, 183 (unsigned long int)nstat->rx_missed_errors); 184 dev_info(&pf->pdev->dev, 185 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", 186 (unsigned long int)nstat->tx_aborted_errors, 187 (unsigned long int)nstat->tx_carrier_errors, 188 (unsigned long int)nstat->tx_fifo_errors); 189 dev_info(&pf->pdev->dev, 190 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", 191 (unsigned long int)nstat->tx_heartbeat_errors, 192 (unsigned long int)nstat->tx_window_errors); 193 dev_info(&pf->pdev->dev, 194 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", 195 (unsigned long int)nstat->rx_compressed, 196 (unsigned long int)nstat->tx_compressed); 197 dev_info(&pf->pdev->dev, 198 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", 199 (unsigned long int)vsi->net_stats_offsets.rx_packets, 200 (unsigned long int)vsi->net_stats_offsets.rx_bytes, 201 (unsigned long int)vsi->net_stats_offsets.rx_errors, 202 (unsigned long int)vsi->net_stats_offsets.rx_dropped); 203 dev_info(&pf->pdev->dev, 204 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", 205 (unsigned long int)vsi->net_stats_offsets.tx_packets, 206 (unsigned long int)vsi->net_stats_offsets.tx_bytes, 207 (unsigned long int)vsi->net_stats_offsets.tx_errors, 208 (unsigned long int)vsi->net_stats_offsets.tx_dropped); 209 dev_info(&pf->pdev->dev, 210 " net_stats_offsets: multicast = %lu, collisions = %lu\n", 211 (unsigned long int)vsi->net_stats_offsets.multicast, 212 (unsigned long int)vsi->net_stats_offsets.collisions); 213 dev_info(&pf->pdev->dev, 214 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", 215 (unsigned long int)vsi->net_stats_offsets.rx_length_errors, 216 (unsigned long int)vsi->net_stats_offsets.rx_over_errors, 217 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors); 218 dev_info(&pf->pdev->dev, 219 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", 220 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors, 221 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors, 222 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors); 223 dev_info(&pf->pdev->dev, 224 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", 225 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors, 226 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors, 227 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors); 228 dev_info(&pf->pdev->dev, 229 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", 230 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors, 231 (unsigned long int)vsi->net_stats_offsets.tx_window_errors); 232 dev_info(&pf->pdev->dev, 233 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", 234 (unsigned long int)vsi->net_stats_offsets.rx_compressed, 235 (unsigned long int)vsi->net_stats_offsets.tx_compressed); 236 dev_info(&pf->pdev->dev, 237 " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", 238 vsi->tx_restart, vsi->tx_busy, 239 vsi->rx_buf_failed, vsi->rx_page_failed); 240 rcu_read_lock(); 241 for (i = 0; i < vsi->num_queue_pairs; i++) { 242 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); 243 244 if (!rx_ring) 245 continue; 246 247 dev_info(&pf->pdev->dev, 248 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 249 i, *rx_ring->state, 250 rx_ring->queue_index, 251 rx_ring->reg_idx); 252 dev_info(&pf->pdev->dev, 253 " rx_rings[%i]: rx_buf_len = %d\n", 254 i, rx_ring->rx_buf_len); 255 dev_info(&pf->pdev->dev, 256 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 257 i, 258 rx_ring->next_to_use, 259 rx_ring->next_to_clean, 260 rx_ring->ring_active); 261 dev_info(&pf->pdev->dev, 262 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", 263 i, rx_ring->stats.packets, 264 rx_ring->stats.bytes, 265 rx_ring->rx_stats.non_eop_descs); 266 dev_info(&pf->pdev->dev, 267 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", 268 i, 269 rx_ring->rx_stats.alloc_page_failed, 270 rx_ring->rx_stats.alloc_buff_failed); 271 dev_info(&pf->pdev->dev, 272 " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n", 273 i, 274 rx_ring->rx_stats.realloc_count, 275 rx_ring->rx_stats.page_reuse_count); 276 dev_info(&pf->pdev->dev, 277 " rx_rings[%i]: size = %i\n", 278 i, rx_ring->size); 279 dev_info(&pf->pdev->dev, 280 " rx_rings[%i]: itr_setting = %d (%s)\n", 281 i, rx_ring->itr_setting, 282 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); 283 } 284 for (i = 0; i < vsi->num_queue_pairs; i++) { 285 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); 286 287 if (!tx_ring) 288 continue; 289 290 dev_info(&pf->pdev->dev, 291 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 292 i, *tx_ring->state, 293 tx_ring->queue_index, 294 tx_ring->reg_idx); 295 dev_info(&pf->pdev->dev, 296 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 297 i, 298 tx_ring->next_to_use, 299 tx_ring->next_to_clean, 300 tx_ring->ring_active); 301 dev_info(&pf->pdev->dev, 302 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", 303 i, tx_ring->stats.packets, 304 tx_ring->stats.bytes, 305 tx_ring->tx_stats.restart_queue); 306 dev_info(&pf->pdev->dev, 307 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", 308 i, 309 tx_ring->tx_stats.tx_busy, 310 tx_ring->tx_stats.tx_done_old); 311 dev_info(&pf->pdev->dev, 312 " tx_rings[%i]: size = %i\n", 313 i, tx_ring->size); 314 dev_info(&pf->pdev->dev, 315 " tx_rings[%i]: DCB tc = %d\n", 316 i, tx_ring->dcb_tc); 317 dev_info(&pf->pdev->dev, 318 " tx_rings[%i]: itr_setting = %d (%s)\n", 319 i, tx_ring->itr_setting, 320 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); 321 } 322 rcu_read_unlock(); 323 dev_info(&pf->pdev->dev, 324 " work_limit = %d\n", 325 vsi->work_limit); 326 dev_info(&pf->pdev->dev, 327 " max_frame = %d, rx_buf_len = %d dtype = %d\n", 328 vsi->max_frame, vsi->rx_buf_len, 0); 329 dev_info(&pf->pdev->dev, 330 " num_q_vectors = %i, base_vector = %i\n", 331 vsi->num_q_vectors, vsi->base_vector); 332 dev_info(&pf->pdev->dev, 333 " seid = %d, id = %d, uplink_seid = %d\n", 334 vsi->seid, vsi->id, vsi->uplink_seid); 335 dev_info(&pf->pdev->dev, 336 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n", 337 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc, 338 vsi->num_rx_desc); 339 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); 340 if (vsi->type == I40E_VSI_SRIOV) 341 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id); 342 dev_info(&pf->pdev->dev, 343 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", 344 vsi->info.valid_sections, vsi->info.switch_id); 345 dev_info(&pf->pdev->dev, 346 " info: sw_reserved[] = 0x%02x 0x%02x\n", 347 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); 348 dev_info(&pf->pdev->dev, 349 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", 350 vsi->info.sec_flags, vsi->info.sec_reserved); 351 dev_info(&pf->pdev->dev, 352 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", 353 vsi->info.pvid, vsi->info.fcoe_pvid, 354 vsi->info.port_vlan_flags); 355 dev_info(&pf->pdev->dev, 356 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", 357 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], 358 vsi->info.pvlan_reserved[2]); 359 dev_info(&pf->pdev->dev, 360 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", 361 vsi->info.ingress_table, vsi->info.egress_table); 362 dev_info(&pf->pdev->dev, 363 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", 364 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, 365 vsi->info.cas_pv_reserved); 366 dev_info(&pf->pdev->dev, 367 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 368 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], 369 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], 370 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], 371 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); 372 dev_info(&pf->pdev->dev, 373 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 374 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], 375 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], 376 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], 377 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); 378 dev_info(&pf->pdev->dev, 379 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 380 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], 381 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], 382 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], 383 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); 384 dev_info(&pf->pdev->dev, 385 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", 386 vsi->info.queueing_opt_flags, 387 vsi->info.queueing_opt_reserved[0], 388 vsi->info.queueing_opt_reserved[1], 389 vsi->info.queueing_opt_reserved[2]); 390 dev_info(&pf->pdev->dev, 391 " info: up_enable_bits = 0x%02x\n", 392 vsi->info.up_enable_bits); 393 dev_info(&pf->pdev->dev, 394 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", 395 vsi->info.sched_reserved, vsi->info.outer_up_table); 396 dev_info(&pf->pdev->dev, 397 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", 398 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], 399 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], 400 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], 401 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); 402 dev_info(&pf->pdev->dev, 403 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 404 vsi->info.qs_handle[0], vsi->info.qs_handle[1], 405 vsi->info.qs_handle[2], vsi->info.qs_handle[3], 406 vsi->info.qs_handle[4], vsi->info.qs_handle[5], 407 vsi->info.qs_handle[6], vsi->info.qs_handle[7]); 408 dev_info(&pf->pdev->dev, 409 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", 410 vsi->info.stat_counter_idx, vsi->info.sched_id); 411 dev_info(&pf->pdev->dev, 412 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", 413 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], 414 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], 415 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], 416 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], 417 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], 418 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); 419 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); 420 dev_info(&pf->pdev->dev, 421 " tc_config: numtc = %d, enabled_tc = 0x%x\n", 422 vsi->tc_config.numtc, vsi->tc_config.enabled_tc); 423 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 424 dev_info(&pf->pdev->dev, 425 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", 426 i, vsi->tc_config.tc_info[i].qoffset, 427 vsi->tc_config.tc_info[i].qcount, 428 vsi->tc_config.tc_info[i].netdev_tc); 429 } 430 dev_info(&pf->pdev->dev, 431 " bw: bw_limit = %d, bw_max_quanta = %d\n", 432 vsi->bw_limit, vsi->bw_max_quanta); 433 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 434 dev_info(&pf->pdev->dev, 435 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", 436 i, vsi->bw_ets_share_credits[i], 437 vsi->bw_ets_limit_credits[i], 438 vsi->bw_ets_max_quanta[i]); 439 } 440 } 441 442 /** 443 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum 444 * @pf: the i40e_pf created in command write 445 **/ 446 static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) 447 { 448 struct i40e_adminq_ring *ring; 449 struct i40e_hw *hw = &pf->hw; 450 char hdr[32]; 451 int i; 452 453 snprintf(hdr, sizeof(hdr), "%s %s: ", 454 dev_driver_string(&pf->pdev->dev), 455 dev_name(&pf->pdev->dev)); 456 457 /* first the send (command) ring, then the receive (event) ring */ 458 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); 459 ring = &(hw->aq.asq); 460 for (i = 0; i < ring->count; i++) { 461 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); 462 463 dev_info(&pf->pdev->dev, 464 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 465 i, d->flags, d->opcode, d->datalen, d->retval, 466 d->cookie_high, d->cookie_low); 467 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 468 16, 1, d->params.raw, 16, 0); 469 } 470 471 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); 472 ring = &(hw->aq.arq); 473 for (i = 0; i < ring->count; i++) { 474 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); 475 476 dev_info(&pf->pdev->dev, 477 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 478 i, d->flags, d->opcode, d->datalen, d->retval, 479 d->cookie_high, d->cookie_low); 480 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 481 16, 1, d->params.raw, 16, 0); 482 } 483 } 484 485 /** 486 * i40e_dbg_dump_desc - handles dump desc write into command datum 487 * @cnt: number of arguments that the user supplied 488 * @vsi_seid: vsi id entered by user 489 * @ring_id: ring id entered by user 490 * @desc_n: descriptor number entered by user 491 * @pf: the i40e_pf created in command write 492 * @is_rx_ring: true if rx, false if tx 493 **/ 494 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, 495 struct i40e_pf *pf, bool is_rx_ring) 496 { 497 struct i40e_tx_desc *txd; 498 union i40e_rx_desc *rxd; 499 struct i40e_ring *ring; 500 struct i40e_vsi *vsi; 501 int i; 502 503 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 504 if (!vsi) { 505 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); 506 return; 507 } 508 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { 509 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); 510 return; 511 } 512 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { 513 dev_info(&pf->pdev->dev, 514 "descriptor rings have not been allocated for vsi %d\n", 515 vsi_seid); 516 return; 517 } 518 519 ring = kmemdup(is_rx_ring 520 ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], 521 sizeof(*ring), GFP_KERNEL); 522 if (!ring) 523 return; 524 525 if (cnt == 2) { 526 dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", 527 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); 528 for (i = 0; i < ring->count; i++) { 529 if (!is_rx_ring) { 530 txd = I40E_TX_DESC(ring, i); 531 dev_info(&pf->pdev->dev, 532 " d[%03x] = 0x%016llx 0x%016llx\n", 533 i, txd->buffer_addr, 534 txd->cmd_type_offset_bsz); 535 } else { 536 rxd = I40E_RX_DESC(ring, i); 537 dev_info(&pf->pdev->dev, 538 " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 539 i, rxd->read.pkt_addr, 540 rxd->read.hdr_addr, 541 rxd->read.rsvd1, rxd->read.rsvd2); 542 } 543 } 544 } else if (cnt == 3) { 545 if (desc_n >= ring->count || desc_n < 0) { 546 dev_info(&pf->pdev->dev, 547 "descriptor %d not found\n", desc_n); 548 goto out; 549 } 550 if (!is_rx_ring) { 551 txd = I40E_TX_DESC(ring, desc_n); 552 dev_info(&pf->pdev->dev, 553 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", 554 vsi_seid, ring_id, desc_n, 555 txd->buffer_addr, txd->cmd_type_offset_bsz); 556 } else { 557 rxd = I40E_RX_DESC(ring, desc_n); 558 dev_info(&pf->pdev->dev, 559 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 560 vsi_seid, ring_id, desc_n, 561 rxd->read.pkt_addr, rxd->read.hdr_addr, 562 rxd->read.rsvd1, rxd->read.rsvd2); 563 } 564 } else { 565 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); 566 } 567 568 out: 569 kfree(ring); 570 } 571 572 /** 573 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum 574 * @pf: the i40e_pf created in command write 575 **/ 576 static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) 577 { 578 int i; 579 580 for (i = 0; i < pf->num_alloc_vsi; i++) 581 if (pf->vsi[i]) 582 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", 583 i, pf->vsi[i]->seid); 584 } 585 586 /** 587 * i40e_dbg_dump_stats - handles dump stats write into command datum 588 * @pf: the i40e_pf created in command write 589 * @estats: the eth stats structure to be dumped 590 **/ 591 static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, 592 struct i40e_eth_stats *estats) 593 { 594 dev_info(&pf->pdev->dev, " ethstats:\n"); 595 dev_info(&pf->pdev->dev, 596 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", 597 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); 598 dev_info(&pf->pdev->dev, 599 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", 600 estats->rx_broadcast, estats->rx_discards); 601 dev_info(&pf->pdev->dev, 602 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", 603 estats->rx_unknown_protocol, estats->tx_bytes); 604 dev_info(&pf->pdev->dev, 605 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", 606 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); 607 dev_info(&pf->pdev->dev, 608 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", 609 estats->tx_discards, estats->tx_errors); 610 } 611 612 /** 613 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb 614 * @pf: the i40e_pf created in command write 615 * @seid: the seid the user put in 616 **/ 617 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) 618 { 619 struct i40e_veb *veb; 620 621 veb = i40e_dbg_find_veb(pf, seid); 622 if (!veb) { 623 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); 624 return; 625 } 626 dev_info(&pf->pdev->dev, 627 "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", 628 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, 629 veb->uplink_seid, 630 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 631 i40e_dbg_dump_eth_stats(pf, &veb->stats); 632 } 633 634 /** 635 * i40e_dbg_dump_veb_all - dumps all known veb's stats 636 * @pf: the i40e_pf created in command write 637 **/ 638 static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) 639 { 640 struct i40e_veb *veb; 641 int i; 642 643 for (i = 0; i < I40E_MAX_VEB; i++) { 644 veb = pf->veb[i]; 645 if (veb) 646 i40e_dbg_dump_veb_seid(pf, veb->seid); 647 } 648 } 649 650 /** 651 * i40e_dbg_dump_vf - dump VF info 652 * @pf: the i40e_pf created in command write 653 * @vf_id: the vf_id from the user 654 **/ 655 static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) 656 { 657 struct i40e_vf *vf; 658 struct i40e_vsi *vsi; 659 660 if (!pf->num_alloc_vfs) { 661 dev_info(&pf->pdev->dev, "no VFs allocated\n"); 662 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) { 663 vf = &pf->vf[vf_id]; 664 vsi = pf->vsi[vf->lan_vsi_idx]; 665 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", 666 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); 667 dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", 668 vf->num_mdd_events, 669 vf->num_invalid_msgs, 670 vf->num_valid_msgs); 671 } else { 672 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); 673 } 674 } 675 676 /** 677 * i40e_dbg_dump_vf_all - dump VF info for all VFs 678 * @pf: the i40e_pf created in command write 679 **/ 680 static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) 681 { 682 int i; 683 684 if (!pf->num_alloc_vfs) 685 dev_info(&pf->pdev->dev, "no VFs enabled!\n"); 686 else 687 for (i = 0; i < pf->num_alloc_vfs; i++) 688 i40e_dbg_dump_vf(pf, i); 689 } 690 691 /** 692 * i40e_dbg_command_write - write into command datum 693 * @filp: the opened file 694 * @buffer: where to find the user's data 695 * @count: the length of the user's data 696 * @ppos: file position offset 697 **/ 698 static ssize_t i40e_dbg_command_write(struct file *filp, 699 const char __user *buffer, 700 size_t count, loff_t *ppos) 701 { 702 struct i40e_pf *pf = filp->private_data; 703 char *cmd_buf, *cmd_buf_tmp; 704 int bytes_not_copied; 705 struct i40e_vsi *vsi; 706 int vsi_seid; 707 int veb_seid; 708 int vf_id; 709 int cnt; 710 711 /* don't allow partial writes */ 712 if (*ppos != 0) 713 return 0; 714 715 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 716 if (!cmd_buf) 717 return count; 718 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 719 if (bytes_not_copied) { 720 kfree(cmd_buf); 721 return -EFAULT; 722 } 723 cmd_buf[count] = '\0'; 724 725 cmd_buf_tmp = strchr(cmd_buf, '\n'); 726 if (cmd_buf_tmp) { 727 *cmd_buf_tmp = '\0'; 728 count = cmd_buf_tmp - cmd_buf + 1; 729 } 730 731 if (strncmp(cmd_buf, "add vsi", 7) == 0) { 732 vsi_seid = -1; 733 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 734 if (cnt == 0) { 735 /* default to PF VSI */ 736 vsi_seid = pf->vsi[pf->lan_vsi]->seid; 737 } else if (vsi_seid < 0) { 738 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", 739 vsi_seid); 740 goto command_write_done; 741 } 742 743 /* By default we are in VEPA mode, if this is the first VF/VMDq 744 * VSI to be added switch to VEB mode. 745 */ 746 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 747 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 748 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 749 } 750 751 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); 752 if (vsi) 753 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", 754 vsi->seid, vsi->uplink_seid); 755 else 756 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); 757 758 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { 759 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 760 if (cnt != 1) { 761 dev_info(&pf->pdev->dev, 762 "del vsi: bad command string, cnt=%d\n", 763 cnt); 764 goto command_write_done; 765 } 766 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 767 if (!vsi) { 768 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", 769 vsi_seid); 770 goto command_write_done; 771 } 772 773 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); 774 i40e_vsi_release(vsi); 775 776 } else if (strncmp(cmd_buf, "add relay", 9) == 0) { 777 struct i40e_veb *veb; 778 int uplink_seid, i; 779 780 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); 781 if (cnt != 2) { 782 dev_info(&pf->pdev->dev, 783 "add relay: bad command string, cnt=%d\n", 784 cnt); 785 goto command_write_done; 786 } else if (uplink_seid < 0) { 787 dev_info(&pf->pdev->dev, 788 "add relay %d: bad uplink seid\n", 789 uplink_seid); 790 goto command_write_done; 791 } 792 793 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 794 if (!vsi) { 795 dev_info(&pf->pdev->dev, 796 "add relay: VSI %d not found\n", vsi_seid); 797 goto command_write_done; 798 } 799 800 for (i = 0; i < I40E_MAX_VEB; i++) 801 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) 802 break; 803 if (i >= I40E_MAX_VEB && uplink_seid != 0 && 804 uplink_seid != pf->mac_seid) { 805 dev_info(&pf->pdev->dev, 806 "add relay: relay uplink %d not found\n", 807 uplink_seid); 808 goto command_write_done; 809 } 810 811 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, 812 vsi->tc_config.enabled_tc); 813 if (veb) 814 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); 815 else 816 dev_info(&pf->pdev->dev, "add relay failed\n"); 817 818 } else if (strncmp(cmd_buf, "del relay", 9) == 0) { 819 int i; 820 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); 821 if (cnt != 1) { 822 dev_info(&pf->pdev->dev, 823 "del relay: bad command string, cnt=%d\n", 824 cnt); 825 goto command_write_done; 826 } else if (veb_seid < 0) { 827 dev_info(&pf->pdev->dev, 828 "del relay %d: bad relay seid\n", veb_seid); 829 goto command_write_done; 830 } 831 832 /* find the veb */ 833 for (i = 0; i < I40E_MAX_VEB; i++) 834 if (pf->veb[i] && pf->veb[i]->seid == veb_seid) 835 break; 836 if (i >= I40E_MAX_VEB) { 837 dev_info(&pf->pdev->dev, 838 "del relay: relay %d not found\n", veb_seid); 839 goto command_write_done; 840 } 841 842 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); 843 i40e_veb_release(pf->veb[i]); 844 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { 845 i40e_status ret; 846 u16 vid; 847 unsigned int v; 848 849 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); 850 if (cnt != 2) { 851 dev_info(&pf->pdev->dev, 852 "add pvid: bad command string, cnt=%d\n", cnt); 853 goto command_write_done; 854 } 855 856 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 857 if (!vsi) { 858 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", 859 vsi_seid); 860 goto command_write_done; 861 } 862 863 vid = v; 864 ret = i40e_vsi_add_pvid(vsi, vid); 865 if (!ret) 866 dev_info(&pf->pdev->dev, 867 "add pvid: %d added to VSI %d\n", 868 vid, vsi_seid); 869 else 870 dev_info(&pf->pdev->dev, 871 "add pvid: %d to VSI %d failed, ret=%d\n", 872 vid, vsi_seid, ret); 873 874 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { 875 876 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 877 if (cnt != 1) { 878 dev_info(&pf->pdev->dev, 879 "del pvid: bad command string, cnt=%d\n", 880 cnt); 881 goto command_write_done; 882 } 883 884 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 885 if (!vsi) { 886 dev_info(&pf->pdev->dev, 887 "del pvid: VSI %d not found\n", vsi_seid); 888 goto command_write_done; 889 } 890 891 i40e_vsi_remove_pvid(vsi); 892 dev_info(&pf->pdev->dev, 893 "del pvid: removed from VSI %d\n", vsi_seid); 894 895 } else if (strncmp(cmd_buf, "dump", 4) == 0) { 896 if (strncmp(&cmd_buf[5], "switch", 6) == 0) { 897 i40e_fetch_switch_configuration(pf, true); 898 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { 899 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 900 if (cnt > 0) 901 i40e_dbg_dump_vsi_seid(pf, vsi_seid); 902 else 903 i40e_dbg_dump_vsi_no_seid(pf); 904 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { 905 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 906 if (cnt > 0) 907 i40e_dbg_dump_veb_seid(pf, vsi_seid); 908 else 909 i40e_dbg_dump_veb_all(pf); 910 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) { 911 cnt = sscanf(&cmd_buf[7], "%i", &vf_id); 912 if (cnt > 0) 913 i40e_dbg_dump_vf(pf, vf_id); 914 else 915 i40e_dbg_dump_vf_all(pf); 916 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { 917 int ring_id, desc_n; 918 if (strncmp(&cmd_buf[10], "rx", 2) == 0) { 919 cnt = sscanf(&cmd_buf[12], "%i %i %i", 920 &vsi_seid, &ring_id, &desc_n); 921 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 922 desc_n, pf, true); 923 } else if (strncmp(&cmd_buf[10], "tx", 2) 924 == 0) { 925 cnt = sscanf(&cmd_buf[12], "%i %i %i", 926 &vsi_seid, &ring_id, &desc_n); 927 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 928 desc_n, pf, false); 929 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { 930 i40e_dbg_dump_aq_desc(pf); 931 } else { 932 dev_info(&pf->pdev->dev, 933 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 934 dev_info(&pf->pdev->dev, 935 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 936 dev_info(&pf->pdev->dev, "dump desc aq\n"); 937 } 938 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { 939 dev_info(&pf->pdev->dev, 940 "core reset count: %d\n", pf->corer_count); 941 dev_info(&pf->pdev->dev, 942 "global reset count: %d\n", pf->globr_count); 943 dev_info(&pf->pdev->dev, 944 "emp reset count: %d\n", pf->empr_count); 945 dev_info(&pf->pdev->dev, 946 "pf reset count: %d\n", pf->pfr_count); 947 dev_info(&pf->pdev->dev, 948 "pf tx sluggish count: %d\n", 949 pf->tx_sluggish_count); 950 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { 951 struct i40e_aqc_query_port_ets_config_resp *bw_data; 952 struct i40e_dcbx_config *cfg = 953 &pf->hw.local_dcbx_config; 954 struct i40e_dcbx_config *r_cfg = 955 &pf->hw.remote_dcbx_config; 956 int i, ret; 957 u16 switch_id; 958 959 bw_data = kzalloc(sizeof( 960 struct i40e_aqc_query_port_ets_config_resp), 961 GFP_KERNEL); 962 if (!bw_data) { 963 ret = -ENOMEM; 964 goto command_write_done; 965 } 966 967 vsi = pf->vsi[pf->lan_vsi]; 968 switch_id = 969 le16_to_cpu(vsi->info.switch_id) & 970 I40E_AQ_VSI_SW_ID_MASK; 971 972 ret = i40e_aq_query_port_ets_config(&pf->hw, 973 switch_id, 974 bw_data, NULL); 975 if (ret) { 976 dev_info(&pf->pdev->dev, 977 "Query Port ETS Config AQ command failed =0x%x\n", 978 pf->hw.aq.asq_last_status); 979 kfree(bw_data); 980 bw_data = NULL; 981 goto command_write_done; 982 } 983 dev_info(&pf->pdev->dev, 984 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", 985 bw_data->tc_valid_bits, 986 bw_data->tc_strict_priority_bits, 987 le16_to_cpu(bw_data->tc_bw_max[0]), 988 le16_to_cpu(bw_data->tc_bw_max[1])); 989 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 990 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", 991 bw_data->tc_bw_share_credits[i], 992 le16_to_cpu(bw_data->tc_bw_limits[i])); 993 } 994 995 kfree(bw_data); 996 bw_data = NULL; 997 998 dev_info(&pf->pdev->dev, 999 "port dcbx_mode=%d\n", cfg->dcbx_mode); 1000 dev_info(&pf->pdev->dev, 1001 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", 1002 cfg->etscfg.willing, cfg->etscfg.cbs, 1003 cfg->etscfg.maxtcs); 1004 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1005 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1006 i, cfg->etscfg.prioritytable[i], 1007 cfg->etscfg.tcbwtable[i], 1008 cfg->etscfg.tsatable[i]); 1009 } 1010 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1011 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1012 i, cfg->etsrec.prioritytable[i], 1013 cfg->etsrec.tcbwtable[i], 1014 cfg->etsrec.tsatable[i]); 1015 } 1016 dev_info(&pf->pdev->dev, 1017 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", 1018 cfg->pfc.willing, cfg->pfc.mbc, 1019 cfg->pfc.pfccap, cfg->pfc.pfcenable); 1020 dev_info(&pf->pdev->dev, 1021 "port app_table: num_apps=%d\n", cfg->numapps); 1022 for (i = 0; i < cfg->numapps; i++) { 1023 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", 1024 i, cfg->app[i].priority, 1025 cfg->app[i].selector, 1026 cfg->app[i].protocolid); 1027 } 1028 /* Peer TLV DCBX data */ 1029 dev_info(&pf->pdev->dev, 1030 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", 1031 r_cfg->etscfg.willing, 1032 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); 1033 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1034 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1035 i, r_cfg->etscfg.prioritytable[i], 1036 r_cfg->etscfg.tcbwtable[i], 1037 r_cfg->etscfg.tsatable[i]); 1038 } 1039 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1040 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1041 i, r_cfg->etsrec.prioritytable[i], 1042 r_cfg->etsrec.tcbwtable[i], 1043 r_cfg->etsrec.tsatable[i]); 1044 } 1045 dev_info(&pf->pdev->dev, 1046 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", 1047 r_cfg->pfc.willing, 1048 r_cfg->pfc.mbc, 1049 r_cfg->pfc.pfccap, 1050 r_cfg->pfc.pfcenable); 1051 dev_info(&pf->pdev->dev, 1052 "remote port app_table: num_apps=%d\n", 1053 r_cfg->numapps); 1054 for (i = 0; i < r_cfg->numapps; i++) { 1055 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", 1056 i, r_cfg->app[i].priority, 1057 r_cfg->app[i].selector, 1058 r_cfg->app[i].protocolid); 1059 } 1060 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { 1061 int cluster_id, table_id; 1062 int index, ret; 1063 u16 buff_len = 4096; 1064 u32 next_index; 1065 u8 next_table; 1066 u8 *buff; 1067 u16 rlen; 1068 1069 cnt = sscanf(&cmd_buf[18], "%i %i %i", 1070 &cluster_id, &table_id, &index); 1071 if (cnt != 3) { 1072 dev_info(&pf->pdev->dev, 1073 "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1074 goto command_write_done; 1075 } 1076 1077 dev_info(&pf->pdev->dev, 1078 "AQ debug dump fwdata params %x %x %x %x\n", 1079 cluster_id, table_id, index, buff_len); 1080 buff = kzalloc(buff_len, GFP_KERNEL); 1081 if (!buff) 1082 goto command_write_done; 1083 1084 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, 1085 index, buff_len, buff, &rlen, 1086 &next_table, &next_index, 1087 NULL); 1088 if (ret) { 1089 dev_info(&pf->pdev->dev, 1090 "debug dump fwdata AQ Failed %d 0x%x\n", 1091 ret, pf->hw.aq.asq_last_status); 1092 kfree(buff); 1093 buff = NULL; 1094 goto command_write_done; 1095 } 1096 dev_info(&pf->pdev->dev, 1097 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", 1098 rlen, next_table, next_index); 1099 print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1100 DUMP_PREFIX_OFFSET, 16, 1, 1101 buff, rlen, true); 1102 kfree(buff); 1103 buff = NULL; 1104 } else { 1105 dev_info(&pf->pdev->dev, 1106 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); 1107 dev_info(&pf->pdev->dev, "dump switch\n"); 1108 dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); 1109 dev_info(&pf->pdev->dev, "dump reset stats\n"); 1110 dev_info(&pf->pdev->dev, "dump port\n"); 1111 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n"); 1112 dev_info(&pf->pdev->dev, 1113 "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1114 } 1115 } else if (strncmp(cmd_buf, "pfr", 3) == 0) { 1116 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); 1117 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); 1118 1119 } else if (strncmp(cmd_buf, "corer", 5) == 0) { 1120 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); 1121 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); 1122 1123 } else if (strncmp(cmd_buf, "globr", 5) == 0) { 1124 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); 1125 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); 1126 1127 } else if (strncmp(cmd_buf, "read", 4) == 0) { 1128 u32 address; 1129 u32 value; 1130 1131 cnt = sscanf(&cmd_buf[4], "%i", &address); 1132 if (cnt != 1) { 1133 dev_info(&pf->pdev->dev, "read <reg>\n"); 1134 goto command_write_done; 1135 } 1136 1137 /* check the range on address */ 1138 if (address > (pf->ioremap_len - sizeof(u32))) { 1139 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", 1140 address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); 1141 goto command_write_done; 1142 } 1143 1144 value = rd32(&pf->hw, address); 1145 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", 1146 address, value); 1147 1148 } else if (strncmp(cmd_buf, "write", 5) == 0) { 1149 u32 address, value; 1150 1151 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); 1152 if (cnt != 2) { 1153 dev_info(&pf->pdev->dev, "write <reg> <value>\n"); 1154 goto command_write_done; 1155 } 1156 1157 /* check the range on address */ 1158 if (address > (pf->ioremap_len - sizeof(u32))) { 1159 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", 1160 address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); 1161 goto command_write_done; 1162 } 1163 wr32(&pf->hw, address, value); 1164 value = rd32(&pf->hw, address); 1165 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", 1166 address, value); 1167 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { 1168 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { 1169 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1170 if (cnt == 0) { 1171 int i; 1172 1173 for (i = 0; i < pf->num_alloc_vsi; i++) 1174 i40e_vsi_reset_stats(pf->vsi[i]); 1175 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1176 } else if (cnt == 1) { 1177 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1178 if (!vsi) { 1179 dev_info(&pf->pdev->dev, 1180 "clear_stats vsi: bad vsi %d\n", 1181 vsi_seid); 1182 goto command_write_done; 1183 } 1184 i40e_vsi_reset_stats(vsi); 1185 dev_info(&pf->pdev->dev, 1186 "vsi clear stats called for vsi %d\n", 1187 vsi_seid); 1188 } else { 1189 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); 1190 } 1191 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { 1192 if (pf->hw.partition_id == 1) { 1193 i40e_pf_reset_stats(pf); 1194 dev_info(&pf->pdev->dev, "port stats cleared\n"); 1195 } else { 1196 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); 1197 } 1198 } else { 1199 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); 1200 } 1201 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { 1202 struct i40e_aq_desc *desc; 1203 i40e_status ret; 1204 1205 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); 1206 if (!desc) 1207 goto command_write_done; 1208 cnt = sscanf(&cmd_buf[11], 1209 "%hi %hi %hi %hi %i %i %i %i %i %i", 1210 &desc->flags, 1211 &desc->opcode, &desc->datalen, &desc->retval, 1212 &desc->cookie_high, &desc->cookie_low, 1213 &desc->params.internal.param0, 1214 &desc->params.internal.param1, 1215 &desc->params.internal.param2, 1216 &desc->params.internal.param3); 1217 if (cnt != 10) { 1218 dev_info(&pf->pdev->dev, 1219 "send aq_cmd: bad command string, cnt=%d\n", 1220 cnt); 1221 kfree(desc); 1222 desc = NULL; 1223 goto command_write_done; 1224 } 1225 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); 1226 if (!ret) { 1227 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); 1228 } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { 1229 dev_info(&pf->pdev->dev, 1230 "AQ command send failed Opcode %x AQ Error: %d\n", 1231 desc->opcode, pf->hw.aq.asq_last_status); 1232 } else { 1233 dev_info(&pf->pdev->dev, 1234 "AQ command send failed Opcode %x Status: %d\n", 1235 desc->opcode, ret); 1236 } 1237 dev_info(&pf->pdev->dev, 1238 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1239 desc->flags, desc->opcode, desc->datalen, desc->retval, 1240 desc->cookie_high, desc->cookie_low, 1241 desc->params.internal.param0, 1242 desc->params.internal.param1, 1243 desc->params.internal.param2, 1244 desc->params.internal.param3); 1245 kfree(desc); 1246 desc = NULL; 1247 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { 1248 struct i40e_aq_desc *desc; 1249 i40e_status ret; 1250 u16 buffer_len; 1251 u8 *buff; 1252 1253 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); 1254 if (!desc) 1255 goto command_write_done; 1256 cnt = sscanf(&cmd_buf[20], 1257 "%hi %hi %hi %hi %i %i %i %i %i %i %hi", 1258 &desc->flags, 1259 &desc->opcode, &desc->datalen, &desc->retval, 1260 &desc->cookie_high, &desc->cookie_low, 1261 &desc->params.internal.param0, 1262 &desc->params.internal.param1, 1263 &desc->params.internal.param2, 1264 &desc->params.internal.param3, 1265 &buffer_len); 1266 if (cnt != 11) { 1267 dev_info(&pf->pdev->dev, 1268 "send indirect aq_cmd: bad command string, cnt=%d\n", 1269 cnt); 1270 kfree(desc); 1271 desc = NULL; 1272 goto command_write_done; 1273 } 1274 /* Just stub a buffer big enough in case user messed up */ 1275 if (buffer_len == 0) 1276 buffer_len = 1280; 1277 1278 buff = kzalloc(buffer_len, GFP_KERNEL); 1279 if (!buff) { 1280 kfree(desc); 1281 desc = NULL; 1282 goto command_write_done; 1283 } 1284 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1285 ret = i40e_asq_send_command(&pf->hw, desc, buff, 1286 buffer_len, NULL); 1287 if (!ret) { 1288 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); 1289 } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { 1290 dev_info(&pf->pdev->dev, 1291 "AQ command send failed Opcode %x AQ Error: %d\n", 1292 desc->opcode, pf->hw.aq.asq_last_status); 1293 } else { 1294 dev_info(&pf->pdev->dev, 1295 "AQ command send failed Opcode %x Status: %d\n", 1296 desc->opcode, ret); 1297 } 1298 dev_info(&pf->pdev->dev, 1299 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1300 desc->flags, desc->opcode, desc->datalen, desc->retval, 1301 desc->cookie_high, desc->cookie_low, 1302 desc->params.internal.param0, 1303 desc->params.internal.param1, 1304 desc->params.internal.param2, 1305 desc->params.internal.param3); 1306 print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1307 DUMP_PREFIX_OFFSET, 16, 1, 1308 buff, buffer_len, true); 1309 kfree(buff); 1310 buff = NULL; 1311 kfree(desc); 1312 desc = NULL; 1313 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { 1314 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", 1315 i40e_get_current_fd_count(pf)); 1316 } else if (strncmp(cmd_buf, "lldp", 4) == 0) { 1317 if (strncmp(&cmd_buf[5], "stop", 4) == 0) { 1318 int ret; 1319 1320 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL); 1321 if (ret) { 1322 dev_info(&pf->pdev->dev, 1323 "Stop LLDP AQ command failed =0x%x\n", 1324 pf->hw.aq.asq_last_status); 1325 goto command_write_done; 1326 } 1327 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, 1328 pf->hw.mac.addr, 1329 ETH_P_LLDP, 0, 1330 pf->vsi[pf->lan_vsi]->seid, 1331 0, true, NULL, NULL); 1332 if (ret) { 1333 dev_info(&pf->pdev->dev, 1334 "%s: Add Control Packet Filter AQ command failed =0x%x\n", 1335 __func__, pf->hw.aq.asq_last_status); 1336 goto command_write_done; 1337 } 1338 #ifdef CONFIG_I40E_DCB 1339 pf->dcbx_cap = DCB_CAP_DCBX_HOST | 1340 DCB_CAP_DCBX_VER_IEEE; 1341 #endif /* CONFIG_I40E_DCB */ 1342 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { 1343 int ret; 1344 1345 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, 1346 pf->hw.mac.addr, 1347 ETH_P_LLDP, 0, 1348 pf->vsi[pf->lan_vsi]->seid, 1349 0, false, NULL, NULL); 1350 if (ret) { 1351 dev_info(&pf->pdev->dev, 1352 "%s: Remove Control Packet Filter AQ command failed =0x%x\n", 1353 __func__, pf->hw.aq.asq_last_status); 1354 /* Continue and start FW LLDP anyways */ 1355 } 1356 1357 ret = i40e_aq_start_lldp(&pf->hw, false, NULL); 1358 if (ret) { 1359 dev_info(&pf->pdev->dev, 1360 "Start LLDP AQ command failed =0x%x\n", 1361 pf->hw.aq.asq_last_status); 1362 goto command_write_done; 1363 } 1364 #ifdef CONFIG_I40E_DCB 1365 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 1366 DCB_CAP_DCBX_VER_IEEE; 1367 #endif /* CONFIG_I40E_DCB */ 1368 } else if (strncmp(&cmd_buf[5], 1369 "get local", 9) == 0) { 1370 u16 llen, rlen; 1371 int ret; 1372 u8 *buff; 1373 1374 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1375 if (!buff) 1376 goto command_write_done; 1377 1378 ret = i40e_aq_get_lldp_mib(&pf->hw, 0, 1379 I40E_AQ_LLDP_MIB_LOCAL, 1380 buff, I40E_LLDPDU_SIZE, 1381 &llen, &rlen, NULL); 1382 if (ret) { 1383 dev_info(&pf->pdev->dev, 1384 "Get LLDP MIB (local) AQ command failed =0x%x\n", 1385 pf->hw.aq.asq_last_status); 1386 kfree(buff); 1387 buff = NULL; 1388 goto command_write_done; 1389 } 1390 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); 1391 print_hex_dump(KERN_INFO, "LLDP MIB (local): ", 1392 DUMP_PREFIX_OFFSET, 16, 1, 1393 buff, I40E_LLDPDU_SIZE, true); 1394 kfree(buff); 1395 buff = NULL; 1396 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { 1397 u16 llen, rlen; 1398 int ret; 1399 u8 *buff; 1400 1401 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1402 if (!buff) 1403 goto command_write_done; 1404 1405 ret = i40e_aq_get_lldp_mib(&pf->hw, 1406 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 1407 I40E_AQ_LLDP_MIB_REMOTE, 1408 buff, I40E_LLDPDU_SIZE, 1409 &llen, &rlen, NULL); 1410 if (ret) { 1411 dev_info(&pf->pdev->dev, 1412 "Get LLDP MIB (remote) AQ command failed =0x%x\n", 1413 pf->hw.aq.asq_last_status); 1414 kfree(buff); 1415 buff = NULL; 1416 goto command_write_done; 1417 } 1418 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); 1419 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", 1420 DUMP_PREFIX_OFFSET, 16, 1, 1421 buff, I40E_LLDPDU_SIZE, true); 1422 kfree(buff); 1423 buff = NULL; 1424 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { 1425 int ret; 1426 1427 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, 1428 true, NULL); 1429 if (ret) { 1430 dev_info(&pf->pdev->dev, 1431 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", 1432 pf->hw.aq.asq_last_status); 1433 goto command_write_done; 1434 } 1435 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { 1436 int ret; 1437 1438 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, 1439 false, NULL); 1440 if (ret) { 1441 dev_info(&pf->pdev->dev, 1442 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", 1443 pf->hw.aq.asq_last_status); 1444 goto command_write_done; 1445 } 1446 } 1447 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { 1448 u16 buffer_len, bytes; 1449 u16 module; 1450 u32 offset; 1451 u16 *buff; 1452 int ret; 1453 1454 cnt = sscanf(&cmd_buf[8], "%hx %x %hx", 1455 &module, &offset, &buffer_len); 1456 if (cnt == 0) { 1457 module = 0; 1458 offset = 0; 1459 buffer_len = 0; 1460 } else if (cnt == 1) { 1461 offset = 0; 1462 buffer_len = 0; 1463 } else if (cnt == 2) { 1464 buffer_len = 0; 1465 } else if (cnt > 3) { 1466 dev_info(&pf->pdev->dev, 1467 "nvm read: bad command string, cnt=%d\n", cnt); 1468 goto command_write_done; 1469 } 1470 1471 /* set the max length */ 1472 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); 1473 1474 bytes = 2 * buffer_len; 1475 1476 /* read at least 1k bytes, no more than 4kB */ 1477 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); 1478 buff = kzalloc(bytes, GFP_KERNEL); 1479 if (!buff) 1480 goto command_write_done; 1481 1482 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 1483 if (ret) { 1484 dev_info(&pf->pdev->dev, 1485 "Failed Acquiring NVM resource for read err=%d status=0x%x\n", 1486 ret, pf->hw.aq.asq_last_status); 1487 kfree(buff); 1488 goto command_write_done; 1489 } 1490 1491 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), 1492 bytes, (u8 *)buff, true, NULL); 1493 i40e_release_nvm(&pf->hw); 1494 if (ret) { 1495 dev_info(&pf->pdev->dev, 1496 "Read NVM AQ failed err=%d status=0x%x\n", 1497 ret, pf->hw.aq.asq_last_status); 1498 } else { 1499 dev_info(&pf->pdev->dev, 1500 "Read NVM module=0x%x offset=0x%x words=%d\n", 1501 module, offset, buffer_len); 1502 if (bytes) 1503 print_hex_dump(KERN_INFO, "NVM Dump: ", 1504 DUMP_PREFIX_OFFSET, 16, 2, 1505 buff, bytes, true); 1506 } 1507 kfree(buff); 1508 buff = NULL; 1509 } else { 1510 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); 1511 dev_info(&pf->pdev->dev, "available commands\n"); 1512 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); 1513 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); 1514 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); 1515 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); 1516 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); 1517 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); 1518 dev_info(&pf->pdev->dev, " dump switch\n"); 1519 dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); 1520 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1521 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1522 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1523 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1524 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); 1525 dev_info(&pf->pdev->dev, " read <reg>\n"); 1526 dev_info(&pf->pdev->dev, " write <reg> <value>\n"); 1527 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); 1528 dev_info(&pf->pdev->dev, " clear_stats port\n"); 1529 dev_info(&pf->pdev->dev, " pfr\n"); 1530 dev_info(&pf->pdev->dev, " corer\n"); 1531 dev_info(&pf->pdev->dev, " globr\n"); 1532 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); 1533 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); 1534 dev_info(&pf->pdev->dev, " fd current cnt"); 1535 dev_info(&pf->pdev->dev, " lldp start\n"); 1536 dev_info(&pf->pdev->dev, " lldp stop\n"); 1537 dev_info(&pf->pdev->dev, " lldp get local\n"); 1538 dev_info(&pf->pdev->dev, " lldp get remote\n"); 1539 dev_info(&pf->pdev->dev, " lldp event on\n"); 1540 dev_info(&pf->pdev->dev, " lldp event off\n"); 1541 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); 1542 } 1543 1544 command_write_done: 1545 kfree(cmd_buf); 1546 cmd_buf = NULL; 1547 return count; 1548 } 1549 1550 static const struct file_operations i40e_dbg_command_fops = { 1551 .owner = THIS_MODULE, 1552 .open = simple_open, 1553 .read = i40e_dbg_command_read, 1554 .write = i40e_dbg_command_write, 1555 }; 1556 1557 /************************************************************** 1558 * netdev_ops 1559 * The netdev_ops entry in debugfs is for giving the driver commands 1560 * to be executed from the netdev operations. 1561 **************************************************************/ 1562 static char i40e_dbg_netdev_ops_buf[256] = ""; 1563 1564 /** 1565 * i40e_dbg_netdev_ops - read for netdev_ops datum 1566 * @filp: the opened file 1567 * @buffer: where to write the data for the user to read 1568 * @count: the size of the user's buffer 1569 * @ppos: file position offset 1570 **/ 1571 static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, 1572 size_t count, loff_t *ppos) 1573 { 1574 struct i40e_pf *pf = filp->private_data; 1575 int bytes_not_copied; 1576 int buf_size = 256; 1577 char *buf; 1578 int len; 1579 1580 /* don't allow partal reads */ 1581 if (*ppos != 0) 1582 return 0; 1583 if (count < buf_size) 1584 return -ENOSPC; 1585 1586 buf = kzalloc(buf_size, GFP_KERNEL); 1587 if (!buf) 1588 return -ENOSPC; 1589 1590 len = snprintf(buf, buf_size, "%s: %s\n", 1591 pf->vsi[pf->lan_vsi]->netdev->name, 1592 i40e_dbg_netdev_ops_buf); 1593 1594 bytes_not_copied = copy_to_user(buffer, buf, len); 1595 kfree(buf); 1596 1597 if (bytes_not_copied) 1598 return -EFAULT; 1599 1600 *ppos = len; 1601 return len; 1602 } 1603 1604 /** 1605 * i40e_dbg_netdev_ops_write - write into netdev_ops datum 1606 * @filp: the opened file 1607 * @buffer: where to find the user's data 1608 * @count: the length of the user's data 1609 * @ppos: file position offset 1610 **/ 1611 static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, 1612 const char __user *buffer, 1613 size_t count, loff_t *ppos) 1614 { 1615 struct i40e_pf *pf = filp->private_data; 1616 int bytes_not_copied; 1617 struct i40e_vsi *vsi; 1618 char *buf_tmp; 1619 int vsi_seid; 1620 int i, cnt; 1621 1622 /* don't allow partial writes */ 1623 if (*ppos != 0) 1624 return 0; 1625 if (count >= sizeof(i40e_dbg_netdev_ops_buf)) 1626 return -ENOSPC; 1627 1628 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); 1629 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, 1630 buffer, count); 1631 if (bytes_not_copied) 1632 return -EFAULT; 1633 i40e_dbg_netdev_ops_buf[count] = '\0'; 1634 1635 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); 1636 if (buf_tmp) { 1637 *buf_tmp = '\0'; 1638 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; 1639 } 1640 1641 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { 1642 int mtu; 1643 1644 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", 1645 &vsi_seid, &mtu); 1646 if (cnt != 2) { 1647 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n"); 1648 goto netdev_ops_write_done; 1649 } 1650 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1651 if (!vsi) { 1652 dev_info(&pf->pdev->dev, 1653 "change_mtu: VSI %d not found\n", vsi_seid); 1654 } else if (!vsi->netdev) { 1655 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", 1656 vsi_seid); 1657 } else if (rtnl_trylock()) { 1658 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, 1659 mtu); 1660 rtnl_unlock(); 1661 dev_info(&pf->pdev->dev, "change_mtu called\n"); 1662 } else { 1663 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); 1664 } 1665 1666 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { 1667 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); 1668 if (cnt != 1) { 1669 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n"); 1670 goto netdev_ops_write_done; 1671 } 1672 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1673 if (!vsi) { 1674 dev_info(&pf->pdev->dev, 1675 "set_rx_mode: VSI %d not found\n", vsi_seid); 1676 } else if (!vsi->netdev) { 1677 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", 1678 vsi_seid); 1679 } else if (rtnl_trylock()) { 1680 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); 1681 rtnl_unlock(); 1682 dev_info(&pf->pdev->dev, "set_rx_mode called\n"); 1683 } else { 1684 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); 1685 } 1686 1687 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { 1688 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); 1689 if (cnt != 1) { 1690 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n"); 1691 goto netdev_ops_write_done; 1692 } 1693 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1694 if (!vsi) { 1695 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", 1696 vsi_seid); 1697 } else if (!vsi->netdev) { 1698 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", 1699 vsi_seid); 1700 } else { 1701 for (i = 0; i < vsi->num_q_vectors; i++) 1702 napi_schedule(&vsi->q_vectors[i]->napi); 1703 dev_info(&pf->pdev->dev, "napi called\n"); 1704 } 1705 } else { 1706 dev_info(&pf->pdev->dev, "unknown command '%s'\n", 1707 i40e_dbg_netdev_ops_buf); 1708 dev_info(&pf->pdev->dev, "available commands\n"); 1709 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n"); 1710 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n"); 1711 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n"); 1712 } 1713 netdev_ops_write_done: 1714 return count; 1715 } 1716 1717 static const struct file_operations i40e_dbg_netdev_ops_fops = { 1718 .owner = THIS_MODULE, 1719 .open = simple_open, 1720 .read = i40e_dbg_netdev_ops_read, 1721 .write = i40e_dbg_netdev_ops_write, 1722 }; 1723 1724 /** 1725 * i40e_dbg_pf_init - setup the debugfs directory for the PF 1726 * @pf: the PF that is starting up 1727 **/ 1728 void i40e_dbg_pf_init(struct i40e_pf *pf) 1729 { 1730 const char *name = pci_name(pf->pdev); 1731 1732 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); 1733 1734 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, 1735 &i40e_dbg_command_fops); 1736 1737 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, 1738 &i40e_dbg_netdev_ops_fops); 1739 } 1740 1741 /** 1742 * i40e_dbg_pf_exit - clear out the PF's debugfs entries 1743 * @pf: the PF that is stopping 1744 **/ 1745 void i40e_dbg_pf_exit(struct i40e_pf *pf) 1746 { 1747 debugfs_remove_recursive(pf->i40e_dbg_pf); 1748 pf->i40e_dbg_pf = NULL; 1749 } 1750 1751 /** 1752 * i40e_dbg_init - start up debugfs for the driver 1753 **/ 1754 void i40e_dbg_init(void) 1755 { 1756 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); 1757 if (!i40e_dbg_root) 1758 pr_info("init of debugfs failed\n"); 1759 } 1760 1761 /** 1762 * i40e_dbg_exit - clean out the driver's debugfs entries 1763 **/ 1764 void i40e_dbg_exit(void) 1765 { 1766 debugfs_remove_recursive(i40e_dbg_root); 1767 i40e_dbg_root = NULL; 1768 } 1769 1770 #endif /* CONFIG_DEBUG_FS */ 1771