1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #ifdef CONFIG_DEBUG_FS 5 6 #include <linux/fs.h> 7 #include <linux/debugfs.h> 8 #include <linux/if_bridge.h> 9 #include "i40e.h" 10 #include "i40e_virtchnl_pf.h" 11 12 static struct dentry *i40e_dbg_root; 13 14 enum ring_type { 15 RING_TYPE_RX, 16 RING_TYPE_TX, 17 RING_TYPE_XDP 18 }; 19 20 /** 21 * i40e_dbg_find_vsi - searches for the vsi with the given seid 22 * @pf: the PF structure to search for the vsi 23 * @seid: seid of the vsi it is searching for 24 **/ 25 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) 26 { 27 if (seid < 0) { 28 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 29 30 return NULL; 31 } 32 33 return i40e_pf_get_vsi_by_seid(pf, seid); 34 } 35 36 /************************************************************** 37 * command 38 * The command entry in debugfs is for giving the driver commands 39 * to be executed - these may be for changing the internal switch 40 * setup, adding or removing filters, or other things. Many of 41 * these will be useful for some forms of unit testing. 42 **************************************************************/ 43 static char i40e_dbg_command_buf[256] = ""; 44 45 /** 46 * i40e_dbg_command_read - read for command datum 47 * @filp: the opened file 48 * @buffer: where to write the data for the user to read 49 * @count: the size of the user's buffer 50 * @ppos: file position offset 51 **/ 52 static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, 53 size_t count, loff_t *ppos) 54 { 55 struct i40e_pf *pf = filp->private_data; 56 struct i40e_vsi *main_vsi; 57 int bytes_not_copied; 58 int buf_size = 256; 59 char *buf; 60 int len; 61 62 /* don't allow partial reads */ 63 if (*ppos != 0) 64 return 0; 65 if (count < buf_size) 66 return -ENOSPC; 67 68 buf = kzalloc(buf_size, GFP_KERNEL); 69 if (!buf) 70 return -ENOSPC; 71 72 main_vsi = i40e_pf_get_main_vsi(pf); 73 len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name, 74 i40e_dbg_command_buf); 75 76 bytes_not_copied = copy_to_user(buffer, buf, len); 77 kfree(buf); 78 79 if (bytes_not_copied) 80 return -EFAULT; 81 82 *ppos = len; 83 return len; 84 } 85 86 static char *i40e_filter_state_string[] = { 87 "INVALID", 88 "NEW", 89 "ACTIVE", 90 "FAILED", 91 "REMOVE", 92 }; 93 94 /** 95 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum 96 * @pf: the i40e_pf created in command write 97 * @seid: the seid the user put in 98 **/ 99 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) 100 { 101 struct rtnl_link_stats64 *nstat; 102 struct i40e_mac_filter *f; 103 struct i40e_vsi *vsi; 104 int i, bkt; 105 106 vsi = i40e_dbg_find_vsi(pf, seid); 107 if (!vsi) { 108 dev_info(&pf->pdev->dev, 109 "dump %d: seid not found\n", seid); 110 return; 111 } 112 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); 113 if (vsi->netdev) { 114 struct net_device *nd = vsi->netdev; 115 116 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", 117 nd->name, nd->state, nd->flags); 118 dev_info(&pf->pdev->dev, " features = 0x%08lx\n", 119 (unsigned long int)nd->features); 120 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", 121 (unsigned long int)nd->hw_features); 122 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", 123 (unsigned long int)nd->vlan_features); 124 } 125 dev_info(&pf->pdev->dev, 126 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", 127 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); 128 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) 129 dev_info(&pf->pdev->dev, 130 " state[%d] = %08lx\n", 131 i, vsi->state[i]); 132 if (vsi->type == I40E_VSI_MAIN) 133 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n", 134 pf->hw.mac.addr, 135 pf->hw.mac.port_addr); 136 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 137 dev_info(&pf->pdev->dev, 138 " mac_filter_hash: %pM vid=%d, state %s\n", 139 f->macaddr, f->vlan, 140 i40e_filter_state_string[f->state]); 141 } 142 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", 143 vsi->active_filters, vsi->promisc_threshold, 144 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? 145 "ON" : "OFF")); 146 nstat = i40e_get_vsi_stats_struct(vsi); 147 dev_info(&pf->pdev->dev, 148 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", 149 (unsigned long int)nstat->rx_packets, 150 (unsigned long int)nstat->rx_bytes, 151 (unsigned long int)nstat->rx_errors, 152 (unsigned long int)nstat->rx_dropped); 153 dev_info(&pf->pdev->dev, 154 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", 155 (unsigned long int)nstat->tx_packets, 156 (unsigned long int)nstat->tx_bytes, 157 (unsigned long int)nstat->tx_errors, 158 (unsigned long int)nstat->tx_dropped); 159 dev_info(&pf->pdev->dev, 160 " net_stats: multicast = %lu, collisions = %lu\n", 161 (unsigned long int)nstat->multicast, 162 (unsigned long int)nstat->collisions); 163 dev_info(&pf->pdev->dev, 164 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", 165 (unsigned long int)nstat->rx_length_errors, 166 (unsigned long int)nstat->rx_over_errors, 167 (unsigned long int)nstat->rx_crc_errors); 168 dev_info(&pf->pdev->dev, 169 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", 170 (unsigned long int)nstat->rx_frame_errors, 171 (unsigned long int)nstat->rx_fifo_errors, 172 (unsigned long int)nstat->rx_missed_errors); 173 dev_info(&pf->pdev->dev, 174 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", 175 (unsigned long int)nstat->tx_aborted_errors, 176 (unsigned long int)nstat->tx_carrier_errors, 177 (unsigned long int)nstat->tx_fifo_errors); 178 dev_info(&pf->pdev->dev, 179 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", 180 (unsigned long int)nstat->tx_heartbeat_errors, 181 (unsigned long int)nstat->tx_window_errors); 182 dev_info(&pf->pdev->dev, 183 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", 184 (unsigned long int)nstat->rx_compressed, 185 (unsigned long int)nstat->tx_compressed); 186 dev_info(&pf->pdev->dev, 187 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", 188 (unsigned long int)vsi->net_stats_offsets.rx_packets, 189 (unsigned long int)vsi->net_stats_offsets.rx_bytes, 190 (unsigned long int)vsi->net_stats_offsets.rx_errors, 191 (unsigned long int)vsi->net_stats_offsets.rx_dropped); 192 dev_info(&pf->pdev->dev, 193 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", 194 (unsigned long int)vsi->net_stats_offsets.tx_packets, 195 (unsigned long int)vsi->net_stats_offsets.tx_bytes, 196 (unsigned long int)vsi->net_stats_offsets.tx_errors, 197 (unsigned long int)vsi->net_stats_offsets.tx_dropped); 198 dev_info(&pf->pdev->dev, 199 " net_stats_offsets: multicast = %lu, collisions = %lu\n", 200 (unsigned long int)vsi->net_stats_offsets.multicast, 201 (unsigned long int)vsi->net_stats_offsets.collisions); 202 dev_info(&pf->pdev->dev, 203 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", 204 (unsigned long int)vsi->net_stats_offsets.rx_length_errors, 205 (unsigned long int)vsi->net_stats_offsets.rx_over_errors, 206 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors); 207 dev_info(&pf->pdev->dev, 208 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", 209 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors, 210 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors, 211 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors); 212 dev_info(&pf->pdev->dev, 213 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", 214 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors, 215 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors, 216 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors); 217 dev_info(&pf->pdev->dev, 218 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", 219 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors, 220 (unsigned long int)vsi->net_stats_offsets.tx_window_errors); 221 dev_info(&pf->pdev->dev, 222 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", 223 (unsigned long int)vsi->net_stats_offsets.rx_compressed, 224 (unsigned long int)vsi->net_stats_offsets.tx_compressed); 225 dev_info(&pf->pdev->dev, 226 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n", 227 vsi->tx_restart, vsi->tx_busy, 228 vsi->rx_buf_failed, vsi->rx_page_failed); 229 rcu_read_lock(); 230 for (i = 0; i < vsi->num_queue_pairs; i++) { 231 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); 232 233 if (!rx_ring) 234 continue; 235 236 dev_info(&pf->pdev->dev, 237 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 238 i, *rx_ring->state, 239 rx_ring->queue_index, 240 rx_ring->reg_idx); 241 dev_info(&pf->pdev->dev, 242 " rx_rings[%i]: rx_buf_len = %d\n", 243 i, rx_ring->rx_buf_len); 244 dev_info(&pf->pdev->dev, 245 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 246 i, 247 rx_ring->next_to_use, 248 rx_ring->next_to_clean, 249 rx_ring->ring_active); 250 dev_info(&pf->pdev->dev, 251 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", 252 i, rx_ring->stats.packets, 253 rx_ring->stats.bytes, 254 rx_ring->rx_stats.non_eop_descs); 255 dev_info(&pf->pdev->dev, 256 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", 257 i, 258 rx_ring->rx_stats.alloc_page_failed, 259 rx_ring->rx_stats.alloc_buff_failed); 260 dev_info(&pf->pdev->dev, 261 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n", 262 i, 263 rx_ring->rx_stats.page_reuse_count); 264 dev_info(&pf->pdev->dev, 265 " rx_rings[%i]: size = %i\n", 266 i, rx_ring->size); 267 dev_info(&pf->pdev->dev, 268 " rx_rings[%i]: itr_setting = %d (%s)\n", 269 i, rx_ring->itr_setting, 270 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); 271 } 272 for (i = 0; i < vsi->num_queue_pairs; i++) { 273 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); 274 275 if (!tx_ring) 276 continue; 277 278 dev_info(&pf->pdev->dev, 279 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 280 i, *tx_ring->state, 281 tx_ring->queue_index, 282 tx_ring->reg_idx); 283 dev_info(&pf->pdev->dev, 284 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 285 i, 286 tx_ring->next_to_use, 287 tx_ring->next_to_clean, 288 tx_ring->ring_active); 289 dev_info(&pf->pdev->dev, 290 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", 291 i, tx_ring->stats.packets, 292 tx_ring->stats.bytes, 293 tx_ring->tx_stats.restart_queue); 294 dev_info(&pf->pdev->dev, 295 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n", 296 i, 297 tx_ring->tx_stats.tx_busy, 298 tx_ring->tx_stats.tx_done_old, 299 tx_ring->tx_stats.tx_stopped); 300 dev_info(&pf->pdev->dev, 301 " tx_rings[%i]: size = %i\n", 302 i, tx_ring->size); 303 dev_info(&pf->pdev->dev, 304 " tx_rings[%i]: DCB tc = %d\n", 305 i, tx_ring->dcb_tc); 306 dev_info(&pf->pdev->dev, 307 " tx_rings[%i]: itr_setting = %d (%s)\n", 308 i, tx_ring->itr_setting, 309 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); 310 } 311 if (i40e_enabled_xdp_vsi(vsi)) { 312 for (i = 0; i < vsi->num_queue_pairs; i++) { 313 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); 314 315 if (!xdp_ring) 316 continue; 317 318 dev_info(&pf->pdev->dev, 319 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 320 i, *xdp_ring->state, 321 xdp_ring->queue_index, 322 xdp_ring->reg_idx); 323 dev_info(&pf->pdev->dev, 324 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 325 i, 326 xdp_ring->next_to_use, 327 xdp_ring->next_to_clean, 328 xdp_ring->ring_active); 329 dev_info(&pf->pdev->dev, 330 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", 331 i, xdp_ring->stats.packets, 332 xdp_ring->stats.bytes, 333 xdp_ring->tx_stats.restart_queue); 334 dev_info(&pf->pdev->dev, 335 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", 336 i, 337 xdp_ring->tx_stats.tx_busy, 338 xdp_ring->tx_stats.tx_done_old); 339 dev_info(&pf->pdev->dev, 340 " xdp_rings[%i]: size = %i\n", 341 i, xdp_ring->size); 342 dev_info(&pf->pdev->dev, 343 " xdp_rings[%i]: DCB tc = %d\n", 344 i, xdp_ring->dcb_tc); 345 dev_info(&pf->pdev->dev, 346 " xdp_rings[%i]: itr_setting = %d (%s)\n", 347 i, xdp_ring->itr_setting, 348 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ? 349 "dynamic" : "fixed"); 350 } 351 } 352 rcu_read_unlock(); 353 dev_info(&pf->pdev->dev, 354 " work_limit = %d\n", 355 vsi->work_limit); 356 dev_info(&pf->pdev->dev, 357 " max_frame = %d, rx_buf_len = %d dtype = %d\n", 358 vsi->max_frame, vsi->rx_buf_len, 0); 359 dev_info(&pf->pdev->dev, 360 " num_q_vectors = %i, base_vector = %i\n", 361 vsi->num_q_vectors, vsi->base_vector); 362 dev_info(&pf->pdev->dev, 363 " seid = %d, id = %d, uplink_seid = %d\n", 364 vsi->seid, vsi->id, vsi->uplink_seid); 365 dev_info(&pf->pdev->dev, 366 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n", 367 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc, 368 vsi->num_rx_desc); 369 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); 370 if (vsi->type == I40E_VSI_SRIOV) 371 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id); 372 dev_info(&pf->pdev->dev, 373 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", 374 vsi->info.valid_sections, vsi->info.switch_id); 375 dev_info(&pf->pdev->dev, 376 " info: sw_reserved[] = 0x%02x 0x%02x\n", 377 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); 378 dev_info(&pf->pdev->dev, 379 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", 380 vsi->info.sec_flags, vsi->info.sec_reserved); 381 dev_info(&pf->pdev->dev, 382 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", 383 vsi->info.pvid, vsi->info.fcoe_pvid, 384 vsi->info.port_vlan_flags); 385 dev_info(&pf->pdev->dev, 386 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", 387 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], 388 vsi->info.pvlan_reserved[2]); 389 dev_info(&pf->pdev->dev, 390 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", 391 vsi->info.ingress_table, vsi->info.egress_table); 392 dev_info(&pf->pdev->dev, 393 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", 394 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, 395 vsi->info.cas_pv_reserved); 396 dev_info(&pf->pdev->dev, 397 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 398 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], 399 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], 400 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], 401 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); 402 dev_info(&pf->pdev->dev, 403 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 404 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], 405 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], 406 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], 407 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); 408 dev_info(&pf->pdev->dev, 409 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 410 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], 411 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], 412 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], 413 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); 414 dev_info(&pf->pdev->dev, 415 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", 416 vsi->info.queueing_opt_flags, 417 vsi->info.queueing_opt_reserved[0], 418 vsi->info.queueing_opt_reserved[1], 419 vsi->info.queueing_opt_reserved[2]); 420 dev_info(&pf->pdev->dev, 421 " info: up_enable_bits = 0x%02x\n", 422 vsi->info.up_enable_bits); 423 dev_info(&pf->pdev->dev, 424 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", 425 vsi->info.sched_reserved, vsi->info.outer_up_table); 426 dev_info(&pf->pdev->dev, 427 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", 428 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], 429 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], 430 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], 431 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); 432 dev_info(&pf->pdev->dev, 433 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 434 vsi->info.qs_handle[0], vsi->info.qs_handle[1], 435 vsi->info.qs_handle[2], vsi->info.qs_handle[3], 436 vsi->info.qs_handle[4], vsi->info.qs_handle[5], 437 vsi->info.qs_handle[6], vsi->info.qs_handle[7]); 438 dev_info(&pf->pdev->dev, 439 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", 440 vsi->info.stat_counter_idx, vsi->info.sched_id); 441 dev_info(&pf->pdev->dev, 442 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", 443 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], 444 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], 445 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], 446 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], 447 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], 448 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); 449 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); 450 dev_info(&pf->pdev->dev, 451 " tc_config: numtc = %d, enabled_tc = 0x%x\n", 452 vsi->tc_config.numtc, vsi->tc_config.enabled_tc); 453 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 454 dev_info(&pf->pdev->dev, 455 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", 456 i, vsi->tc_config.tc_info[i].qoffset, 457 vsi->tc_config.tc_info[i].qcount, 458 vsi->tc_config.tc_info[i].netdev_tc); 459 } 460 dev_info(&pf->pdev->dev, 461 " bw: bw_limit = %d, bw_max_quanta = %d\n", 462 vsi->bw_limit, vsi->bw_max_quanta); 463 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 464 dev_info(&pf->pdev->dev, 465 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", 466 i, vsi->bw_ets_share_credits[i], 467 vsi->bw_ets_limit_credits[i], 468 vsi->bw_ets_max_quanta[i]); 469 } 470 } 471 472 /** 473 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum 474 * @pf: the i40e_pf created in command write 475 **/ 476 static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) 477 { 478 struct i40e_adminq_ring *ring; 479 struct i40e_hw *hw = &pf->hw; 480 char hdr[32]; 481 int i; 482 483 snprintf(hdr, sizeof(hdr), "%s %s: ", 484 dev_driver_string(&pf->pdev->dev), 485 dev_name(&pf->pdev->dev)); 486 487 /* first the send (command) ring, then the receive (event) ring */ 488 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); 489 ring = &(hw->aq.asq); 490 for (i = 0; i < ring->count; i++) { 491 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); 492 493 dev_info(&pf->pdev->dev, 494 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 495 i, d->flags, d->opcode, d->datalen, d->retval, 496 d->cookie_high, d->cookie_low); 497 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 498 16, 1, d->params.raw, 16, 0); 499 } 500 501 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); 502 ring = &(hw->aq.arq); 503 for (i = 0; i < ring->count; i++) { 504 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); 505 506 dev_info(&pf->pdev->dev, 507 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 508 i, d->flags, d->opcode, d->datalen, d->retval, 509 d->cookie_high, d->cookie_low); 510 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 511 16, 1, d->params.raw, 16, 0); 512 } 513 } 514 515 /** 516 * i40e_dbg_dump_desc - handles dump desc write into command datum 517 * @cnt: number of arguments that the user supplied 518 * @vsi_seid: vsi id entered by user 519 * @ring_id: ring id entered by user 520 * @desc_n: descriptor number entered by user 521 * @pf: the i40e_pf created in command write 522 * @type: enum describing whether ring is RX, TX or XDP 523 **/ 524 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, 525 struct i40e_pf *pf, enum ring_type type) 526 { 527 bool is_rx_ring = type == RING_TYPE_RX; 528 struct i40e_tx_desc *txd; 529 union i40e_rx_desc *rxd; 530 struct i40e_ring *ring; 531 struct i40e_vsi *vsi; 532 int i; 533 534 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 535 if (!vsi) { 536 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); 537 return; 538 } 539 if (vsi->type != I40E_VSI_MAIN && 540 vsi->type != I40E_VSI_FDIR && 541 vsi->type != I40E_VSI_VMDQ2) { 542 dev_info(&pf->pdev->dev, 543 "vsi %d type %d descriptor rings not available\n", 544 vsi_seid, vsi->type); 545 return; 546 } 547 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { 548 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); 549 return; 550 } 551 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { 552 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); 553 return; 554 } 555 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { 556 dev_info(&pf->pdev->dev, 557 "descriptor rings have not been allocated for vsi %d\n", 558 vsi_seid); 559 return; 560 } 561 562 switch (type) { 563 case RING_TYPE_RX: 564 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL); 565 break; 566 case RING_TYPE_TX: 567 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL); 568 break; 569 case RING_TYPE_XDP: 570 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); 571 break; 572 default: 573 ring = NULL; 574 break; 575 } 576 if (!ring) 577 return; 578 579 if (cnt == 2) { 580 switch (type) { 581 case RING_TYPE_RX: 582 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id); 583 break; 584 case RING_TYPE_TX: 585 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id); 586 break; 587 case RING_TYPE_XDP: 588 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id); 589 break; 590 } 591 for (i = 0; i < ring->count; i++) { 592 if (!is_rx_ring) { 593 txd = I40E_TX_DESC(ring, i); 594 dev_info(&pf->pdev->dev, 595 " d[%03x] = 0x%016llx 0x%016llx\n", 596 i, txd->buffer_addr, 597 txd->cmd_type_offset_bsz); 598 } else { 599 rxd = I40E_RX_DESC(ring, i); 600 dev_info(&pf->pdev->dev, 601 " d[%03x] = 0x%016llx 0x%016llx\n", 602 i, rxd->read.pkt_addr, 603 rxd->read.hdr_addr); 604 } 605 } 606 } else if (cnt == 3) { 607 if (desc_n >= ring->count || desc_n < 0) { 608 dev_info(&pf->pdev->dev, 609 "descriptor %d not found\n", desc_n); 610 goto out; 611 } 612 if (!is_rx_ring) { 613 txd = I40E_TX_DESC(ring, desc_n); 614 dev_info(&pf->pdev->dev, 615 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", 616 vsi_seid, ring_id, desc_n, 617 txd->buffer_addr, txd->cmd_type_offset_bsz); 618 } else { 619 rxd = I40E_RX_DESC(ring, desc_n); 620 dev_info(&pf->pdev->dev, 621 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", 622 vsi_seid, ring_id, desc_n, 623 rxd->read.pkt_addr, rxd->read.hdr_addr); 624 } 625 } else { 626 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n"); 627 } 628 629 out: 630 kfree(ring); 631 } 632 633 /** 634 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum 635 * @pf: the i40e_pf created in command write 636 **/ 637 static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) 638 { 639 struct i40e_vsi *vsi; 640 int i; 641 642 i40e_pf_for_each_vsi(pf, i, vsi) 643 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid); 644 } 645 646 /** 647 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum 648 * @pf: the i40e_pf created in command write 649 * @estats: the eth stats structure to be dumped 650 **/ 651 static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, 652 struct i40e_eth_stats *estats) 653 { 654 dev_info(&pf->pdev->dev, " ethstats:\n"); 655 dev_info(&pf->pdev->dev, 656 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", 657 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); 658 dev_info(&pf->pdev->dev, 659 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", 660 estats->rx_broadcast, estats->rx_discards); 661 dev_info(&pf->pdev->dev, 662 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", 663 estats->rx_unknown_protocol, estats->tx_bytes); 664 dev_info(&pf->pdev->dev, 665 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", 666 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); 667 dev_info(&pf->pdev->dev, 668 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", 669 estats->tx_discards, estats->tx_errors); 670 } 671 672 /** 673 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb 674 * @pf: the i40e_pf created in command write 675 * @seid: the seid the user put in 676 **/ 677 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) 678 { 679 struct i40e_veb *veb; 680 681 veb = i40e_pf_get_veb_by_seid(pf, seid); 682 if (!veb) { 683 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); 684 return; 685 } 686 dev_info(&pf->pdev->dev, 687 "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n", 688 veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid, 689 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 690 i40e_dbg_dump_eth_stats(pf, &veb->stats); 691 } 692 693 /** 694 * i40e_dbg_dump_veb_all - dumps all known veb's stats 695 * @pf: the i40e_pf created in command write 696 **/ 697 static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) 698 { 699 struct i40e_veb *veb; 700 int i; 701 702 i40e_pf_for_each_veb(pf, i, veb) 703 i40e_dbg_dump_veb_seid(pf, veb->seid); 704 } 705 706 /** 707 * i40e_dbg_dump_vf - dump VF info 708 * @pf: the i40e_pf created in command write 709 * @vf_id: the vf_id from the user 710 **/ 711 static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) 712 { 713 struct i40e_vf *vf; 714 struct i40e_vsi *vsi; 715 716 if (!pf->num_alloc_vfs) { 717 dev_info(&pf->pdev->dev, "no VFs allocated\n"); 718 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) { 719 vf = &pf->vf[vf_id]; 720 vsi = pf->vsi[vf->lan_vsi_idx]; 721 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", 722 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); 723 dev_info(&pf->pdev->dev, " num MDD=%lld\n", 724 vf->num_mdd_events); 725 } else { 726 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); 727 } 728 } 729 730 /** 731 * i40e_dbg_dump_vf_all - dump VF info for all VFs 732 * @pf: the i40e_pf created in command write 733 **/ 734 static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) 735 { 736 int i; 737 738 if (!pf->num_alloc_vfs) 739 dev_info(&pf->pdev->dev, "no VFs enabled!\n"); 740 else 741 for (i = 0; i < pf->num_alloc_vfs; i++) 742 i40e_dbg_dump_vf(pf, i); 743 } 744 745 /** 746 * i40e_dbg_command_write - write into command datum 747 * @filp: the opened file 748 * @buffer: where to find the user's data 749 * @count: the length of the user's data 750 * @ppos: file position offset 751 **/ 752 static ssize_t i40e_dbg_command_write(struct file *filp, 753 const char __user *buffer, 754 size_t count, loff_t *ppos) 755 { 756 struct i40e_pf *pf = filp->private_data; 757 char *cmd_buf, *cmd_buf_tmp; 758 int bytes_not_copied; 759 struct i40e_vsi *vsi; 760 int vsi_seid; 761 int veb_seid; 762 int vf_id; 763 int cnt; 764 765 /* don't allow partial writes */ 766 if (*ppos != 0) 767 return 0; 768 769 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 770 if (!cmd_buf) 771 return count; 772 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 773 if (bytes_not_copied) { 774 kfree(cmd_buf); 775 return -EFAULT; 776 } 777 cmd_buf[count] = '\0'; 778 779 cmd_buf_tmp = strchr(cmd_buf, '\n'); 780 if (cmd_buf_tmp) { 781 *cmd_buf_tmp = '\0'; 782 count = cmd_buf_tmp - cmd_buf + 1; 783 } 784 785 if (strncmp(cmd_buf, "add vsi", 7) == 0) { 786 vsi_seid = -1; 787 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 788 if (cnt == 0) { 789 /* default to PF VSI */ 790 vsi = i40e_pf_get_main_vsi(pf); 791 vsi_seid = vsi->seid; 792 } else if (vsi_seid < 0) { 793 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", 794 vsi_seid); 795 goto command_write_done; 796 } 797 798 /* By default we are in VEPA mode, if this is the first VF/VMDq 799 * VSI to be added switch to VEB mode. 800 */ 801 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { 802 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 803 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 804 } 805 806 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); 807 if (vsi) 808 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", 809 vsi->seid, vsi->uplink_seid); 810 else 811 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); 812 813 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { 814 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 815 if (cnt != 1) { 816 dev_info(&pf->pdev->dev, 817 "del vsi: bad command string, cnt=%d\n", 818 cnt); 819 goto command_write_done; 820 } 821 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 822 if (!vsi) { 823 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", 824 vsi_seid); 825 goto command_write_done; 826 } 827 828 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); 829 i40e_vsi_release(vsi); 830 831 } else if (strncmp(cmd_buf, "add relay", 9) == 0) { 832 struct i40e_veb *veb; 833 u8 enabled_tc = 0x1; 834 int uplink_seid; 835 836 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); 837 if (cnt == 0) { 838 uplink_seid = 0; 839 vsi_seid = 0; 840 } else if (cnt != 2) { 841 dev_info(&pf->pdev->dev, 842 "add relay: bad command string, cnt=%d\n", 843 cnt); 844 goto command_write_done; 845 } else if (uplink_seid < 0) { 846 dev_info(&pf->pdev->dev, 847 "add relay %d: bad uplink seid\n", 848 uplink_seid); 849 goto command_write_done; 850 } 851 852 if (uplink_seid != 0 && uplink_seid != pf->mac_seid) { 853 dev_info(&pf->pdev->dev, 854 "add relay: relay uplink %d not found\n", 855 uplink_seid); 856 goto command_write_done; 857 } else if (uplink_seid) { 858 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid); 859 if (!vsi) { 860 dev_info(&pf->pdev->dev, 861 "add relay: VSI %d not found\n", 862 vsi_seid); 863 goto command_write_done; 864 } 865 enabled_tc = vsi->tc_config.enabled_tc; 866 } else if (vsi_seid) { 867 dev_info(&pf->pdev->dev, 868 "add relay: VSI must be 0 for floating relay\n"); 869 goto command_write_done; 870 } 871 872 veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc); 873 if (veb) 874 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); 875 else 876 dev_info(&pf->pdev->dev, "add relay failed\n"); 877 878 } else if (strncmp(cmd_buf, "del relay", 9) == 0) { 879 struct i40e_veb *veb; 880 int i; 881 882 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); 883 if (cnt != 1) { 884 dev_info(&pf->pdev->dev, 885 "del relay: bad command string, cnt=%d\n", 886 cnt); 887 goto command_write_done; 888 } else if (veb_seid < 0) { 889 dev_info(&pf->pdev->dev, 890 "del relay %d: bad relay seid\n", veb_seid); 891 goto command_write_done; 892 } 893 894 /* find the veb */ 895 i40e_pf_for_each_veb(pf, i, veb) 896 if (veb->seid == veb_seid) 897 break; 898 899 if (i >= I40E_MAX_VEB) { 900 dev_info(&pf->pdev->dev, 901 "del relay: relay %d not found\n", veb_seid); 902 goto command_write_done; 903 } 904 905 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); 906 i40e_veb_release(veb); 907 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { 908 unsigned int v; 909 int ret; 910 u16 vid; 911 912 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); 913 if (cnt != 2) { 914 dev_info(&pf->pdev->dev, 915 "add pvid: bad command string, cnt=%d\n", cnt); 916 goto command_write_done; 917 } 918 919 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 920 if (!vsi) { 921 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", 922 vsi_seid); 923 goto command_write_done; 924 } 925 926 vid = v; 927 ret = i40e_vsi_add_pvid(vsi, vid); 928 if (!ret) 929 dev_info(&pf->pdev->dev, 930 "add pvid: %d added to VSI %d\n", 931 vid, vsi_seid); 932 else 933 dev_info(&pf->pdev->dev, 934 "add pvid: %d to VSI %d failed, ret=%d\n", 935 vid, vsi_seid, ret); 936 937 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { 938 939 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 940 if (cnt != 1) { 941 dev_info(&pf->pdev->dev, 942 "del pvid: bad command string, cnt=%d\n", 943 cnt); 944 goto command_write_done; 945 } 946 947 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 948 if (!vsi) { 949 dev_info(&pf->pdev->dev, 950 "del pvid: VSI %d not found\n", vsi_seid); 951 goto command_write_done; 952 } 953 954 i40e_vsi_remove_pvid(vsi); 955 dev_info(&pf->pdev->dev, 956 "del pvid: removed from VSI %d\n", vsi_seid); 957 958 } else if (strncmp(cmd_buf, "dump", 4) == 0) { 959 if (strncmp(&cmd_buf[5], "switch", 6) == 0) { 960 i40e_fetch_switch_configuration(pf, true); 961 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { 962 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 963 if (cnt > 0) 964 i40e_dbg_dump_vsi_seid(pf, vsi_seid); 965 else 966 i40e_dbg_dump_vsi_no_seid(pf); 967 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { 968 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 969 if (cnt > 0) 970 i40e_dbg_dump_veb_seid(pf, vsi_seid); 971 else 972 i40e_dbg_dump_veb_all(pf); 973 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) { 974 cnt = sscanf(&cmd_buf[7], "%i", &vf_id); 975 if (cnt > 0) 976 i40e_dbg_dump_vf(pf, vf_id); 977 else 978 i40e_dbg_dump_vf_all(pf); 979 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { 980 int ring_id, desc_n; 981 if (strncmp(&cmd_buf[10], "rx", 2) == 0) { 982 cnt = sscanf(&cmd_buf[12], "%i %i %i", 983 &vsi_seid, &ring_id, &desc_n); 984 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 985 desc_n, pf, RING_TYPE_RX); 986 } else if (strncmp(&cmd_buf[10], "tx", 2) 987 == 0) { 988 cnt = sscanf(&cmd_buf[12], "%i %i %i", 989 &vsi_seid, &ring_id, &desc_n); 990 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 991 desc_n, pf, RING_TYPE_TX); 992 } else if (strncmp(&cmd_buf[10], "xdp", 3) 993 == 0) { 994 cnt = sscanf(&cmd_buf[13], "%i %i %i", 995 &vsi_seid, &ring_id, &desc_n); 996 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 997 desc_n, pf, RING_TYPE_XDP); 998 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { 999 i40e_dbg_dump_aq_desc(pf); 1000 } else { 1001 dev_info(&pf->pdev->dev, 1002 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1003 dev_info(&pf->pdev->dev, 1004 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1005 dev_info(&pf->pdev->dev, 1006 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n"); 1007 dev_info(&pf->pdev->dev, "dump desc aq\n"); 1008 } 1009 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { 1010 dev_info(&pf->pdev->dev, 1011 "core reset count: %d\n", pf->corer_count); 1012 dev_info(&pf->pdev->dev, 1013 "global reset count: %d\n", pf->globr_count); 1014 dev_info(&pf->pdev->dev, 1015 "emp reset count: %d\n", pf->empr_count); 1016 dev_info(&pf->pdev->dev, 1017 "pf reset count: %d\n", pf->pfr_count); 1018 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { 1019 struct i40e_aqc_query_port_ets_config_resp *bw_data; 1020 struct i40e_dcbx_config *cfg = 1021 &pf->hw.local_dcbx_config; 1022 struct i40e_dcbx_config *r_cfg = 1023 &pf->hw.remote_dcbx_config; 1024 int i, ret; 1025 u16 switch_id; 1026 1027 bw_data = kzalloc(sizeof( 1028 struct i40e_aqc_query_port_ets_config_resp), 1029 GFP_KERNEL); 1030 if (!bw_data) { 1031 ret = -ENOMEM; 1032 goto command_write_done; 1033 } 1034 1035 vsi = i40e_pf_get_main_vsi(pf); 1036 switch_id = 1037 le16_to_cpu(vsi->info.switch_id) & 1038 I40E_AQ_VSI_SW_ID_MASK; 1039 1040 ret = i40e_aq_query_port_ets_config(&pf->hw, 1041 switch_id, 1042 bw_data, NULL); 1043 if (ret) { 1044 dev_info(&pf->pdev->dev, 1045 "Query Port ETS Config AQ command failed =0x%x\n", 1046 pf->hw.aq.asq_last_status); 1047 kfree(bw_data); 1048 bw_data = NULL; 1049 goto command_write_done; 1050 } 1051 dev_info(&pf->pdev->dev, 1052 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", 1053 bw_data->tc_valid_bits, 1054 bw_data->tc_strict_priority_bits, 1055 le16_to_cpu(bw_data->tc_bw_max[0]), 1056 le16_to_cpu(bw_data->tc_bw_max[1])); 1057 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1058 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", 1059 bw_data->tc_bw_share_credits[i], 1060 le16_to_cpu(bw_data->tc_bw_limits[i])); 1061 } 1062 1063 kfree(bw_data); 1064 bw_data = NULL; 1065 1066 dev_info(&pf->pdev->dev, 1067 "port dcbx_mode=%d\n", cfg->dcbx_mode); 1068 dev_info(&pf->pdev->dev, 1069 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", 1070 cfg->etscfg.willing, cfg->etscfg.cbs, 1071 cfg->etscfg.maxtcs); 1072 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1073 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1074 i, cfg->etscfg.prioritytable[i], 1075 cfg->etscfg.tcbwtable[i], 1076 cfg->etscfg.tsatable[i]); 1077 } 1078 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1079 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1080 i, cfg->etsrec.prioritytable[i], 1081 cfg->etsrec.tcbwtable[i], 1082 cfg->etsrec.tsatable[i]); 1083 } 1084 dev_info(&pf->pdev->dev, 1085 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", 1086 cfg->pfc.willing, cfg->pfc.mbc, 1087 cfg->pfc.pfccap, cfg->pfc.pfcenable); 1088 dev_info(&pf->pdev->dev, 1089 "port app_table: num_apps=%d\n", cfg->numapps); 1090 for (i = 0; i < cfg->numapps; i++) { 1091 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", 1092 i, cfg->app[i].priority, 1093 cfg->app[i].selector, 1094 cfg->app[i].protocolid); 1095 } 1096 /* Peer TLV DCBX data */ 1097 dev_info(&pf->pdev->dev, 1098 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", 1099 r_cfg->etscfg.willing, 1100 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); 1101 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1102 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1103 i, r_cfg->etscfg.prioritytable[i], 1104 r_cfg->etscfg.tcbwtable[i], 1105 r_cfg->etscfg.tsatable[i]); 1106 } 1107 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1108 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1109 i, r_cfg->etsrec.prioritytable[i], 1110 r_cfg->etsrec.tcbwtable[i], 1111 r_cfg->etsrec.tsatable[i]); 1112 } 1113 dev_info(&pf->pdev->dev, 1114 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", 1115 r_cfg->pfc.willing, 1116 r_cfg->pfc.mbc, 1117 r_cfg->pfc.pfccap, 1118 r_cfg->pfc.pfcenable); 1119 dev_info(&pf->pdev->dev, 1120 "remote port app_table: num_apps=%d\n", 1121 r_cfg->numapps); 1122 for (i = 0; i < r_cfg->numapps; i++) { 1123 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", 1124 i, r_cfg->app[i].priority, 1125 r_cfg->app[i].selector, 1126 r_cfg->app[i].protocolid); 1127 } 1128 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { 1129 int cluster_id, table_id; 1130 int index, ret; 1131 u16 buff_len = 4096; 1132 u32 next_index; 1133 u8 next_table; 1134 u8 *buff; 1135 u16 rlen; 1136 1137 cnt = sscanf(&cmd_buf[18], "%i %i %i", 1138 &cluster_id, &table_id, &index); 1139 if (cnt != 3) { 1140 dev_info(&pf->pdev->dev, 1141 "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1142 goto command_write_done; 1143 } 1144 1145 dev_info(&pf->pdev->dev, 1146 "AQ debug dump fwdata params %x %x %x %x\n", 1147 cluster_id, table_id, index, buff_len); 1148 buff = kzalloc(buff_len, GFP_KERNEL); 1149 if (!buff) 1150 goto command_write_done; 1151 1152 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, 1153 index, buff_len, buff, &rlen, 1154 &next_table, &next_index, 1155 NULL); 1156 if (ret) { 1157 dev_info(&pf->pdev->dev, 1158 "debug dump fwdata AQ Failed %d 0x%x\n", 1159 ret, pf->hw.aq.asq_last_status); 1160 kfree(buff); 1161 buff = NULL; 1162 goto command_write_done; 1163 } 1164 dev_info(&pf->pdev->dev, 1165 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", 1166 rlen, next_table, next_index); 1167 print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1168 DUMP_PREFIX_OFFSET, 16, 1, 1169 buff, rlen, true); 1170 kfree(buff); 1171 buff = NULL; 1172 } else { 1173 dev_info(&pf->pdev->dev, 1174 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n"); 1175 dev_info(&pf->pdev->dev, "dump switch\n"); 1176 dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); 1177 dev_info(&pf->pdev->dev, "dump reset stats\n"); 1178 dev_info(&pf->pdev->dev, "dump port\n"); 1179 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n"); 1180 dev_info(&pf->pdev->dev, 1181 "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1182 } 1183 } else if (strncmp(cmd_buf, "pfr", 3) == 0) { 1184 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); 1185 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); 1186 1187 } else if (strncmp(cmd_buf, "corer", 5) == 0) { 1188 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); 1189 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); 1190 1191 } else if (strncmp(cmd_buf, "globr", 5) == 0) { 1192 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); 1193 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); 1194 1195 } else if (strncmp(cmd_buf, "read", 4) == 0) { 1196 u32 address; 1197 u32 value; 1198 1199 cnt = sscanf(&cmd_buf[4], "%i", &address); 1200 if (cnt != 1) { 1201 dev_info(&pf->pdev->dev, "read <reg>\n"); 1202 goto command_write_done; 1203 } 1204 1205 /* check the range on address */ 1206 if (address > (pf->ioremap_len - sizeof(u32))) { 1207 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", 1208 address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); 1209 goto command_write_done; 1210 } 1211 1212 value = rd32(&pf->hw, address); 1213 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", 1214 address, value); 1215 1216 } else if (strncmp(cmd_buf, "write", 5) == 0) { 1217 u32 address, value; 1218 1219 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); 1220 if (cnt != 2) { 1221 dev_info(&pf->pdev->dev, "write <reg> <value>\n"); 1222 goto command_write_done; 1223 } 1224 1225 /* check the range on address */ 1226 if (address > (pf->ioremap_len - sizeof(u32))) { 1227 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", 1228 address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); 1229 goto command_write_done; 1230 } 1231 wr32(&pf->hw, address, value); 1232 value = rd32(&pf->hw, address); 1233 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", 1234 address, value); 1235 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { 1236 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { 1237 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1238 if (cnt == 0) { 1239 int i; 1240 1241 i40e_pf_for_each_vsi(pf, i, vsi) 1242 i40e_vsi_reset_stats(vsi); 1243 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1244 } else if (cnt == 1) { 1245 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1246 if (!vsi) { 1247 dev_info(&pf->pdev->dev, 1248 "clear_stats vsi: bad vsi %d\n", 1249 vsi_seid); 1250 goto command_write_done; 1251 } 1252 i40e_vsi_reset_stats(vsi); 1253 dev_info(&pf->pdev->dev, 1254 "vsi clear stats called for vsi %d\n", 1255 vsi_seid); 1256 } else { 1257 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); 1258 } 1259 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { 1260 if (pf->hw.partition_id == 1) { 1261 i40e_pf_reset_stats(pf); 1262 dev_info(&pf->pdev->dev, "port stats cleared\n"); 1263 } else { 1264 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); 1265 } 1266 } else { 1267 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); 1268 } 1269 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { 1270 struct i40e_aq_desc *desc; 1271 int ret; 1272 1273 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); 1274 if (!desc) 1275 goto command_write_done; 1276 cnt = sscanf(&cmd_buf[11], 1277 "%hi %hi %hi %hi %i %i %i %i %i %i", 1278 &desc->flags, 1279 &desc->opcode, &desc->datalen, &desc->retval, 1280 &desc->cookie_high, &desc->cookie_low, 1281 &desc->params.internal.param0, 1282 &desc->params.internal.param1, 1283 &desc->params.internal.param2, 1284 &desc->params.internal.param3); 1285 if (cnt != 10) { 1286 dev_info(&pf->pdev->dev, 1287 "send aq_cmd: bad command string, cnt=%d\n", 1288 cnt); 1289 kfree(desc); 1290 desc = NULL; 1291 goto command_write_done; 1292 } 1293 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); 1294 if (!ret) { 1295 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); 1296 } else if (ret == -EIO) { 1297 dev_info(&pf->pdev->dev, 1298 "AQ command send failed Opcode %x AQ Error: %d\n", 1299 desc->opcode, pf->hw.aq.asq_last_status); 1300 } else { 1301 dev_info(&pf->pdev->dev, 1302 "AQ command send failed Opcode %x Status: %d\n", 1303 desc->opcode, ret); 1304 } 1305 dev_info(&pf->pdev->dev, 1306 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1307 desc->flags, desc->opcode, desc->datalen, desc->retval, 1308 desc->cookie_high, desc->cookie_low, 1309 desc->params.internal.param0, 1310 desc->params.internal.param1, 1311 desc->params.internal.param2, 1312 desc->params.internal.param3); 1313 kfree(desc); 1314 desc = NULL; 1315 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { 1316 struct i40e_aq_desc *desc; 1317 u16 buffer_len; 1318 u8 *buff; 1319 int ret; 1320 1321 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); 1322 if (!desc) 1323 goto command_write_done; 1324 cnt = sscanf(&cmd_buf[20], 1325 "%hi %hi %hi %hi %i %i %i %i %i %i %hi", 1326 &desc->flags, 1327 &desc->opcode, &desc->datalen, &desc->retval, 1328 &desc->cookie_high, &desc->cookie_low, 1329 &desc->params.internal.param0, 1330 &desc->params.internal.param1, 1331 &desc->params.internal.param2, 1332 &desc->params.internal.param3, 1333 &buffer_len); 1334 if (cnt != 11) { 1335 dev_info(&pf->pdev->dev, 1336 "send indirect aq_cmd: bad command string, cnt=%d\n", 1337 cnt); 1338 kfree(desc); 1339 desc = NULL; 1340 goto command_write_done; 1341 } 1342 /* Just stub a buffer big enough in case user messed up */ 1343 if (buffer_len == 0) 1344 buffer_len = 1280; 1345 1346 buff = kzalloc(buffer_len, GFP_KERNEL); 1347 if (!buff) { 1348 kfree(desc); 1349 desc = NULL; 1350 goto command_write_done; 1351 } 1352 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1353 ret = i40e_asq_send_command(&pf->hw, desc, buff, 1354 buffer_len, NULL); 1355 if (!ret) { 1356 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); 1357 } else if (ret == -EIO) { 1358 dev_info(&pf->pdev->dev, 1359 "AQ command send failed Opcode %x AQ Error: %d\n", 1360 desc->opcode, pf->hw.aq.asq_last_status); 1361 } else { 1362 dev_info(&pf->pdev->dev, 1363 "AQ command send failed Opcode %x Status: %d\n", 1364 desc->opcode, ret); 1365 } 1366 dev_info(&pf->pdev->dev, 1367 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1368 desc->flags, desc->opcode, desc->datalen, desc->retval, 1369 desc->cookie_high, desc->cookie_low, 1370 desc->params.internal.param0, 1371 desc->params.internal.param1, 1372 desc->params.internal.param2, 1373 desc->params.internal.param3); 1374 print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1375 DUMP_PREFIX_OFFSET, 16, 1, 1376 buff, buffer_len, true); 1377 kfree(buff); 1378 buff = NULL; 1379 kfree(desc); 1380 desc = NULL; 1381 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { 1382 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", 1383 i40e_get_current_fd_count(pf)); 1384 } else if (strncmp(cmd_buf, "lldp", 4) == 0) { 1385 /* Get main VSI */ 1386 struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); 1387 1388 if (strncmp(&cmd_buf[5], "stop", 4) == 0) { 1389 int ret; 1390 1391 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL); 1392 if (ret) { 1393 dev_info(&pf->pdev->dev, 1394 "Stop LLDP AQ command failed =0x%x\n", 1395 pf->hw.aq.asq_last_status); 1396 goto command_write_done; 1397 } 1398 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, 1399 pf->hw.mac.addr, ETH_P_LLDP, 0, 1400 main_vsi->seid, 0, true, NULL, 1401 NULL); 1402 if (ret) { 1403 dev_info(&pf->pdev->dev, 1404 "%s: Add Control Packet Filter AQ command failed =0x%x\n", 1405 __func__, pf->hw.aq.asq_last_status); 1406 goto command_write_done; 1407 } 1408 #ifdef CONFIG_I40E_DCB 1409 pf->dcbx_cap = DCB_CAP_DCBX_HOST | 1410 DCB_CAP_DCBX_VER_IEEE; 1411 #endif /* CONFIG_I40E_DCB */ 1412 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { 1413 int ret; 1414 1415 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, 1416 pf->hw.mac.addr, ETH_P_LLDP, 0, 1417 main_vsi->seid, 0, false, NULL, 1418 NULL); 1419 if (ret) { 1420 dev_info(&pf->pdev->dev, 1421 "%s: Remove Control Packet Filter AQ command failed =0x%x\n", 1422 __func__, pf->hw.aq.asq_last_status); 1423 /* Continue and start FW LLDP anyways */ 1424 } 1425 1426 ret = i40e_aq_start_lldp(&pf->hw, false, NULL); 1427 if (ret) { 1428 dev_info(&pf->pdev->dev, 1429 "Start LLDP AQ command failed =0x%x\n", 1430 pf->hw.aq.asq_last_status); 1431 goto command_write_done; 1432 } 1433 #ifdef CONFIG_I40E_DCB 1434 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 1435 DCB_CAP_DCBX_VER_IEEE; 1436 #endif /* CONFIG_I40E_DCB */ 1437 } else if (strncmp(&cmd_buf[5], 1438 "get local", 9) == 0) { 1439 u16 llen, rlen; 1440 int ret; 1441 u8 *buff; 1442 1443 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1444 if (!buff) 1445 goto command_write_done; 1446 1447 ret = i40e_aq_get_lldp_mib(&pf->hw, 0, 1448 I40E_AQ_LLDP_MIB_LOCAL, 1449 buff, I40E_LLDPDU_SIZE, 1450 &llen, &rlen, NULL); 1451 if (ret) { 1452 dev_info(&pf->pdev->dev, 1453 "Get LLDP MIB (local) AQ command failed =0x%x\n", 1454 pf->hw.aq.asq_last_status); 1455 kfree(buff); 1456 buff = NULL; 1457 goto command_write_done; 1458 } 1459 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); 1460 print_hex_dump(KERN_INFO, "LLDP MIB (local): ", 1461 DUMP_PREFIX_OFFSET, 16, 1, 1462 buff, I40E_LLDPDU_SIZE, true); 1463 kfree(buff); 1464 buff = NULL; 1465 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { 1466 u16 llen, rlen; 1467 int ret; 1468 u8 *buff; 1469 1470 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1471 if (!buff) 1472 goto command_write_done; 1473 1474 ret = i40e_aq_get_lldp_mib(&pf->hw, 1475 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 1476 I40E_AQ_LLDP_MIB_REMOTE, 1477 buff, I40E_LLDPDU_SIZE, 1478 &llen, &rlen, NULL); 1479 if (ret) { 1480 dev_info(&pf->pdev->dev, 1481 "Get LLDP MIB (remote) AQ command failed =0x%x\n", 1482 pf->hw.aq.asq_last_status); 1483 kfree(buff); 1484 buff = NULL; 1485 goto command_write_done; 1486 } 1487 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); 1488 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", 1489 DUMP_PREFIX_OFFSET, 16, 1, 1490 buff, I40E_LLDPDU_SIZE, true); 1491 kfree(buff); 1492 buff = NULL; 1493 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { 1494 int ret; 1495 1496 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, 1497 true, NULL); 1498 if (ret) { 1499 dev_info(&pf->pdev->dev, 1500 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", 1501 pf->hw.aq.asq_last_status); 1502 goto command_write_done; 1503 } 1504 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { 1505 int ret; 1506 1507 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, 1508 false, NULL); 1509 if (ret) { 1510 dev_info(&pf->pdev->dev, 1511 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", 1512 pf->hw.aq.asq_last_status); 1513 goto command_write_done; 1514 } 1515 } 1516 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { 1517 u16 buffer_len, bytes; 1518 u16 module; 1519 u32 offset; 1520 u16 *buff; 1521 int ret; 1522 1523 cnt = sscanf(&cmd_buf[8], "%hx %x %hx", 1524 &module, &offset, &buffer_len); 1525 if (cnt == 0) { 1526 module = 0; 1527 offset = 0; 1528 buffer_len = 0; 1529 } else if (cnt == 1) { 1530 offset = 0; 1531 buffer_len = 0; 1532 } else if (cnt == 2) { 1533 buffer_len = 0; 1534 } else if (cnt > 3) { 1535 dev_info(&pf->pdev->dev, 1536 "nvm read: bad command string, cnt=%d\n", cnt); 1537 goto command_write_done; 1538 } 1539 1540 /* set the max length */ 1541 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); 1542 1543 bytes = 2 * buffer_len; 1544 1545 /* read at least 1k bytes, no more than 4kB */ 1546 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); 1547 buff = kzalloc(bytes, GFP_KERNEL); 1548 if (!buff) 1549 goto command_write_done; 1550 1551 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 1552 if (ret) { 1553 dev_info(&pf->pdev->dev, 1554 "Failed Acquiring NVM resource for read err=%d status=0x%x\n", 1555 ret, pf->hw.aq.asq_last_status); 1556 kfree(buff); 1557 goto command_write_done; 1558 } 1559 1560 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), 1561 bytes, (u8 *)buff, true, NULL); 1562 i40e_release_nvm(&pf->hw); 1563 if (ret) { 1564 dev_info(&pf->pdev->dev, 1565 "Read NVM AQ failed err=%d status=0x%x\n", 1566 ret, pf->hw.aq.asq_last_status); 1567 } else { 1568 dev_info(&pf->pdev->dev, 1569 "Read NVM module=0x%x offset=0x%x words=%d\n", 1570 module, offset, buffer_len); 1571 if (bytes) 1572 print_hex_dump(KERN_INFO, "NVM Dump: ", 1573 DUMP_PREFIX_OFFSET, 16, 2, 1574 buff, bytes, true); 1575 } 1576 kfree(buff); 1577 buff = NULL; 1578 } else { 1579 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); 1580 dev_info(&pf->pdev->dev, "available commands\n"); 1581 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); 1582 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); 1583 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); 1584 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); 1585 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); 1586 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); 1587 dev_info(&pf->pdev->dev, " dump switch\n"); 1588 dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); 1589 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1590 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1591 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n"); 1592 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1593 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1594 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); 1595 dev_info(&pf->pdev->dev, " read <reg>\n"); 1596 dev_info(&pf->pdev->dev, " write <reg> <value>\n"); 1597 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); 1598 dev_info(&pf->pdev->dev, " clear_stats port\n"); 1599 dev_info(&pf->pdev->dev, " pfr\n"); 1600 dev_info(&pf->pdev->dev, " corer\n"); 1601 dev_info(&pf->pdev->dev, " globr\n"); 1602 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); 1603 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); 1604 dev_info(&pf->pdev->dev, " fd current cnt"); 1605 dev_info(&pf->pdev->dev, " lldp start\n"); 1606 dev_info(&pf->pdev->dev, " lldp stop\n"); 1607 dev_info(&pf->pdev->dev, " lldp get local\n"); 1608 dev_info(&pf->pdev->dev, " lldp get remote\n"); 1609 dev_info(&pf->pdev->dev, " lldp event on\n"); 1610 dev_info(&pf->pdev->dev, " lldp event off\n"); 1611 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); 1612 } 1613 1614 command_write_done: 1615 kfree(cmd_buf); 1616 cmd_buf = NULL; 1617 return count; 1618 } 1619 1620 static const struct file_operations i40e_dbg_command_fops = { 1621 .owner = THIS_MODULE, 1622 .open = simple_open, 1623 .read = i40e_dbg_command_read, 1624 .write = i40e_dbg_command_write, 1625 }; 1626 1627 /************************************************************** 1628 * netdev_ops 1629 * The netdev_ops entry in debugfs is for giving the driver commands 1630 * to be executed from the netdev operations. 1631 **************************************************************/ 1632 static char i40e_dbg_netdev_ops_buf[256] = ""; 1633 1634 /** 1635 * i40e_dbg_netdev_ops_read - read for netdev_ops datum 1636 * @filp: the opened file 1637 * @buffer: where to write the data for the user to read 1638 * @count: the size of the user's buffer 1639 * @ppos: file position offset 1640 **/ 1641 static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, 1642 size_t count, loff_t *ppos) 1643 { 1644 struct i40e_pf *pf = filp->private_data; 1645 struct i40e_vsi *main_vsi; 1646 int bytes_not_copied; 1647 int buf_size = 256; 1648 char *buf; 1649 int len; 1650 1651 /* don't allow partal reads */ 1652 if (*ppos != 0) 1653 return 0; 1654 if (count < buf_size) 1655 return -ENOSPC; 1656 1657 buf = kzalloc(buf_size, GFP_KERNEL); 1658 if (!buf) 1659 return -ENOSPC; 1660 1661 main_vsi = i40e_pf_get_main_vsi(pf); 1662 len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name, 1663 i40e_dbg_netdev_ops_buf); 1664 1665 bytes_not_copied = copy_to_user(buffer, buf, len); 1666 kfree(buf); 1667 1668 if (bytes_not_copied) 1669 return -EFAULT; 1670 1671 *ppos = len; 1672 return len; 1673 } 1674 1675 /** 1676 * i40e_dbg_netdev_ops_write - write into netdev_ops datum 1677 * @filp: the opened file 1678 * @buffer: where to find the user's data 1679 * @count: the length of the user's data 1680 * @ppos: file position offset 1681 **/ 1682 static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, 1683 const char __user *buffer, 1684 size_t count, loff_t *ppos) 1685 { 1686 struct i40e_pf *pf = filp->private_data; 1687 int bytes_not_copied; 1688 struct i40e_vsi *vsi; 1689 char *buf_tmp; 1690 int vsi_seid; 1691 int i, cnt; 1692 1693 /* don't allow partial writes */ 1694 if (*ppos != 0) 1695 return 0; 1696 if (count >= sizeof(i40e_dbg_netdev_ops_buf)) 1697 return -ENOSPC; 1698 1699 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); 1700 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, 1701 buffer, count); 1702 if (bytes_not_copied) 1703 return -EFAULT; 1704 i40e_dbg_netdev_ops_buf[count] = '\0'; 1705 1706 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); 1707 if (buf_tmp) { 1708 *buf_tmp = '\0'; 1709 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; 1710 } 1711 1712 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { 1713 int mtu; 1714 1715 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", 1716 &vsi_seid, &mtu); 1717 if (cnt != 2) { 1718 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n"); 1719 goto netdev_ops_write_done; 1720 } 1721 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1722 if (!vsi) { 1723 dev_info(&pf->pdev->dev, 1724 "change_mtu: VSI %d not found\n", vsi_seid); 1725 } else if (!vsi->netdev) { 1726 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", 1727 vsi_seid); 1728 } else if (rtnl_trylock()) { 1729 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, 1730 mtu); 1731 rtnl_unlock(); 1732 dev_info(&pf->pdev->dev, "change_mtu called\n"); 1733 } else { 1734 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); 1735 } 1736 1737 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { 1738 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); 1739 if (cnt != 1) { 1740 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n"); 1741 goto netdev_ops_write_done; 1742 } 1743 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1744 if (!vsi) { 1745 dev_info(&pf->pdev->dev, 1746 "set_rx_mode: VSI %d not found\n", vsi_seid); 1747 } else if (!vsi->netdev) { 1748 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", 1749 vsi_seid); 1750 } else if (rtnl_trylock()) { 1751 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); 1752 rtnl_unlock(); 1753 dev_info(&pf->pdev->dev, "set_rx_mode called\n"); 1754 } else { 1755 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); 1756 } 1757 1758 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { 1759 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); 1760 if (cnt != 1) { 1761 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n"); 1762 goto netdev_ops_write_done; 1763 } 1764 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1765 if (!vsi) { 1766 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", 1767 vsi_seid); 1768 } else if (!vsi->netdev) { 1769 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", 1770 vsi_seid); 1771 } else { 1772 for (i = 0; i < vsi->num_q_vectors; i++) 1773 napi_schedule(&vsi->q_vectors[i]->napi); 1774 dev_info(&pf->pdev->dev, "napi called\n"); 1775 } 1776 } else { 1777 dev_info(&pf->pdev->dev, "unknown command '%s'\n", 1778 i40e_dbg_netdev_ops_buf); 1779 dev_info(&pf->pdev->dev, "available commands\n"); 1780 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n"); 1781 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n"); 1782 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n"); 1783 } 1784 netdev_ops_write_done: 1785 return count; 1786 } 1787 1788 static const struct file_operations i40e_dbg_netdev_ops_fops = { 1789 .owner = THIS_MODULE, 1790 .open = simple_open, 1791 .read = i40e_dbg_netdev_ops_read, 1792 .write = i40e_dbg_netdev_ops_write, 1793 }; 1794 1795 /** 1796 * i40e_dbg_pf_init - setup the debugfs directory for the PF 1797 * @pf: the PF that is starting up 1798 **/ 1799 void i40e_dbg_pf_init(struct i40e_pf *pf) 1800 { 1801 const char *name = pci_name(pf->pdev); 1802 1803 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); 1804 1805 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, 1806 &i40e_dbg_command_fops); 1807 1808 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, 1809 &i40e_dbg_netdev_ops_fops); 1810 } 1811 1812 /** 1813 * i40e_dbg_pf_exit - clear out the PF's debugfs entries 1814 * @pf: the PF that is stopping 1815 **/ 1816 void i40e_dbg_pf_exit(struct i40e_pf *pf) 1817 { 1818 debugfs_remove_recursive(pf->i40e_dbg_pf); 1819 pf->i40e_dbg_pf = NULL; 1820 } 1821 1822 /** 1823 * i40e_dbg_init - start up debugfs for the driver 1824 **/ 1825 void i40e_dbg_init(void) 1826 { 1827 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); 1828 if (IS_ERR(i40e_dbg_root)) 1829 pr_info("init of debugfs failed\n"); 1830 } 1831 1832 /** 1833 * i40e_dbg_exit - clean out the driver's debugfs entries 1834 **/ 1835 void i40e_dbg_exit(void) 1836 { 1837 debugfs_remove_recursive(i40e_dbg_root); 1838 i40e_dbg_root = NULL; 1839 } 1840 1841 #endif /* CONFIG_DEBUG_FS */ 1842