1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #ifdef CONFIG_DEBUG_FS 5 6 #include <linux/fs.h> 7 #include <linux/debugfs.h> 8 #include <linux/if_bridge.h> 9 #include "i40e.h" 10 #include "i40e_virtchnl_pf.h" 11 12 static struct dentry *i40e_dbg_root; 13 14 enum ring_type { 15 RING_TYPE_RX, 16 RING_TYPE_TX, 17 RING_TYPE_XDP 18 }; 19 20 /** 21 * i40e_dbg_find_vsi - searches for the vsi with the given seid 22 * @pf: the PF structure to search for the vsi 23 * @seid: seid of the vsi it is searching for 24 **/ 25 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) 26 { 27 if (seid < 0) { 28 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 29 30 return NULL; 31 } 32 33 return i40e_pf_get_vsi_by_seid(pf, seid); 34 } 35 36 /************************************************************** 37 * command 38 * The command entry in debugfs is for giving the driver commands 39 * to be executed - these may be for changing the internal switch 40 * setup, adding or removing filters, or other things. Many of 41 * these will be useful for some forms of unit testing. 42 **************************************************************/ 43 static char i40e_dbg_command_buf[256] = ""; 44 45 /** 46 * i40e_dbg_command_read - read for command datum 47 * @filp: the opened file 48 * @buffer: where to write the data for the user to read 49 * @count: the size of the user's buffer 50 * @ppos: file position offset 51 **/ 52 static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, 53 size_t count, loff_t *ppos) 54 { 55 struct i40e_pf *pf = filp->private_data; 56 int bytes_not_copied; 57 int buf_size = 256; 58 char *buf; 59 int len; 60 61 /* don't allow partial reads */ 62 if (*ppos != 0) 63 return 0; 64 if (count < buf_size) 65 return -ENOSPC; 66 67 buf = kzalloc(buf_size, GFP_KERNEL); 68 if (!buf) 69 return -ENOSPC; 70 71 len = snprintf(buf, buf_size, "%s: %s\n", 72 pf->vsi[pf->lan_vsi]->netdev->name, 73 i40e_dbg_command_buf); 74 75 bytes_not_copied = copy_to_user(buffer, buf, len); 76 kfree(buf); 77 78 if (bytes_not_copied) 79 return -EFAULT; 80 81 *ppos = len; 82 return len; 83 } 84 85 static char *i40e_filter_state_string[] = { 86 "INVALID", 87 "NEW", 88 "ACTIVE", 89 "FAILED", 90 "REMOVE", 91 }; 92 93 /** 94 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum 95 * @pf: the i40e_pf created in command write 96 * @seid: the seid the user put in 97 **/ 98 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) 99 { 100 struct rtnl_link_stats64 *nstat; 101 struct i40e_mac_filter *f; 102 struct i40e_vsi *vsi; 103 int i, bkt; 104 105 vsi = i40e_dbg_find_vsi(pf, seid); 106 if (!vsi) { 107 dev_info(&pf->pdev->dev, 108 "dump %d: seid not found\n", seid); 109 return; 110 } 111 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); 112 if (vsi->netdev) { 113 struct net_device *nd = vsi->netdev; 114 115 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", 116 nd->name, nd->state, nd->flags); 117 dev_info(&pf->pdev->dev, " features = 0x%08lx\n", 118 (unsigned long int)nd->features); 119 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", 120 (unsigned long int)nd->hw_features); 121 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", 122 (unsigned long int)nd->vlan_features); 123 } 124 dev_info(&pf->pdev->dev, 125 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", 126 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); 127 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) 128 dev_info(&pf->pdev->dev, 129 " state[%d] = %08lx\n", 130 i, vsi->state[i]); 131 if (vsi == pf->vsi[pf->lan_vsi]) 132 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n", 133 pf->hw.mac.addr, 134 pf->hw.mac.port_addr); 135 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 136 dev_info(&pf->pdev->dev, 137 " mac_filter_hash: %pM vid=%d, state %s\n", 138 f->macaddr, f->vlan, 139 i40e_filter_state_string[f->state]); 140 } 141 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", 142 vsi->active_filters, vsi->promisc_threshold, 143 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? 144 "ON" : "OFF")); 145 nstat = i40e_get_vsi_stats_struct(vsi); 146 dev_info(&pf->pdev->dev, 147 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", 148 (unsigned long int)nstat->rx_packets, 149 (unsigned long int)nstat->rx_bytes, 150 (unsigned long int)nstat->rx_errors, 151 (unsigned long int)nstat->rx_dropped); 152 dev_info(&pf->pdev->dev, 153 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", 154 (unsigned long int)nstat->tx_packets, 155 (unsigned long int)nstat->tx_bytes, 156 (unsigned long int)nstat->tx_errors, 157 (unsigned long int)nstat->tx_dropped); 158 dev_info(&pf->pdev->dev, 159 " net_stats: multicast = %lu, collisions = %lu\n", 160 (unsigned long int)nstat->multicast, 161 (unsigned long int)nstat->collisions); 162 dev_info(&pf->pdev->dev, 163 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", 164 (unsigned long int)nstat->rx_length_errors, 165 (unsigned long int)nstat->rx_over_errors, 166 (unsigned long int)nstat->rx_crc_errors); 167 dev_info(&pf->pdev->dev, 168 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", 169 (unsigned long int)nstat->rx_frame_errors, 170 (unsigned long int)nstat->rx_fifo_errors, 171 (unsigned long int)nstat->rx_missed_errors); 172 dev_info(&pf->pdev->dev, 173 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", 174 (unsigned long int)nstat->tx_aborted_errors, 175 (unsigned long int)nstat->tx_carrier_errors, 176 (unsigned long int)nstat->tx_fifo_errors); 177 dev_info(&pf->pdev->dev, 178 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", 179 (unsigned long int)nstat->tx_heartbeat_errors, 180 (unsigned long int)nstat->tx_window_errors); 181 dev_info(&pf->pdev->dev, 182 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", 183 (unsigned long int)nstat->rx_compressed, 184 (unsigned long int)nstat->tx_compressed); 185 dev_info(&pf->pdev->dev, 186 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", 187 (unsigned long int)vsi->net_stats_offsets.rx_packets, 188 (unsigned long int)vsi->net_stats_offsets.rx_bytes, 189 (unsigned long int)vsi->net_stats_offsets.rx_errors, 190 (unsigned long int)vsi->net_stats_offsets.rx_dropped); 191 dev_info(&pf->pdev->dev, 192 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", 193 (unsigned long int)vsi->net_stats_offsets.tx_packets, 194 (unsigned long int)vsi->net_stats_offsets.tx_bytes, 195 (unsigned long int)vsi->net_stats_offsets.tx_errors, 196 (unsigned long int)vsi->net_stats_offsets.tx_dropped); 197 dev_info(&pf->pdev->dev, 198 " net_stats_offsets: multicast = %lu, collisions = %lu\n", 199 (unsigned long int)vsi->net_stats_offsets.multicast, 200 (unsigned long int)vsi->net_stats_offsets.collisions); 201 dev_info(&pf->pdev->dev, 202 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", 203 (unsigned long int)vsi->net_stats_offsets.rx_length_errors, 204 (unsigned long int)vsi->net_stats_offsets.rx_over_errors, 205 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors); 206 dev_info(&pf->pdev->dev, 207 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", 208 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors, 209 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors, 210 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors); 211 dev_info(&pf->pdev->dev, 212 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", 213 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors, 214 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors, 215 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors); 216 dev_info(&pf->pdev->dev, 217 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", 218 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors, 219 (unsigned long int)vsi->net_stats_offsets.tx_window_errors); 220 dev_info(&pf->pdev->dev, 221 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", 222 (unsigned long int)vsi->net_stats_offsets.rx_compressed, 223 (unsigned long int)vsi->net_stats_offsets.tx_compressed); 224 dev_info(&pf->pdev->dev, 225 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n", 226 vsi->tx_restart, vsi->tx_busy, 227 vsi->rx_buf_failed, vsi->rx_page_failed); 228 rcu_read_lock(); 229 for (i = 0; i < vsi->num_queue_pairs; i++) { 230 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); 231 232 if (!rx_ring) 233 continue; 234 235 dev_info(&pf->pdev->dev, 236 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 237 i, *rx_ring->state, 238 rx_ring->queue_index, 239 rx_ring->reg_idx); 240 dev_info(&pf->pdev->dev, 241 " rx_rings[%i]: rx_buf_len = %d\n", 242 i, rx_ring->rx_buf_len); 243 dev_info(&pf->pdev->dev, 244 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 245 i, 246 rx_ring->next_to_use, 247 rx_ring->next_to_clean, 248 rx_ring->ring_active); 249 dev_info(&pf->pdev->dev, 250 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", 251 i, rx_ring->stats.packets, 252 rx_ring->stats.bytes, 253 rx_ring->rx_stats.non_eop_descs); 254 dev_info(&pf->pdev->dev, 255 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", 256 i, 257 rx_ring->rx_stats.alloc_page_failed, 258 rx_ring->rx_stats.alloc_buff_failed); 259 dev_info(&pf->pdev->dev, 260 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n", 261 i, 262 rx_ring->rx_stats.page_reuse_count); 263 dev_info(&pf->pdev->dev, 264 " rx_rings[%i]: size = %i\n", 265 i, rx_ring->size); 266 dev_info(&pf->pdev->dev, 267 " rx_rings[%i]: itr_setting = %d (%s)\n", 268 i, rx_ring->itr_setting, 269 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); 270 } 271 for (i = 0; i < vsi->num_queue_pairs; i++) { 272 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); 273 274 if (!tx_ring) 275 continue; 276 277 dev_info(&pf->pdev->dev, 278 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 279 i, *tx_ring->state, 280 tx_ring->queue_index, 281 tx_ring->reg_idx); 282 dev_info(&pf->pdev->dev, 283 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 284 i, 285 tx_ring->next_to_use, 286 tx_ring->next_to_clean, 287 tx_ring->ring_active); 288 dev_info(&pf->pdev->dev, 289 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", 290 i, tx_ring->stats.packets, 291 tx_ring->stats.bytes, 292 tx_ring->tx_stats.restart_queue); 293 dev_info(&pf->pdev->dev, 294 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n", 295 i, 296 tx_ring->tx_stats.tx_busy, 297 tx_ring->tx_stats.tx_done_old, 298 tx_ring->tx_stats.tx_stopped); 299 dev_info(&pf->pdev->dev, 300 " tx_rings[%i]: size = %i\n", 301 i, tx_ring->size); 302 dev_info(&pf->pdev->dev, 303 " tx_rings[%i]: DCB tc = %d\n", 304 i, tx_ring->dcb_tc); 305 dev_info(&pf->pdev->dev, 306 " tx_rings[%i]: itr_setting = %d (%s)\n", 307 i, tx_ring->itr_setting, 308 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); 309 } 310 if (i40e_enabled_xdp_vsi(vsi)) { 311 for (i = 0; i < vsi->num_queue_pairs; i++) { 312 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); 313 314 if (!xdp_ring) 315 continue; 316 317 dev_info(&pf->pdev->dev, 318 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", 319 i, *xdp_ring->state, 320 xdp_ring->queue_index, 321 xdp_ring->reg_idx); 322 dev_info(&pf->pdev->dev, 323 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", 324 i, 325 xdp_ring->next_to_use, 326 xdp_ring->next_to_clean, 327 xdp_ring->ring_active); 328 dev_info(&pf->pdev->dev, 329 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", 330 i, xdp_ring->stats.packets, 331 xdp_ring->stats.bytes, 332 xdp_ring->tx_stats.restart_queue); 333 dev_info(&pf->pdev->dev, 334 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", 335 i, 336 xdp_ring->tx_stats.tx_busy, 337 xdp_ring->tx_stats.tx_done_old); 338 dev_info(&pf->pdev->dev, 339 " xdp_rings[%i]: size = %i\n", 340 i, xdp_ring->size); 341 dev_info(&pf->pdev->dev, 342 " xdp_rings[%i]: DCB tc = %d\n", 343 i, xdp_ring->dcb_tc); 344 dev_info(&pf->pdev->dev, 345 " xdp_rings[%i]: itr_setting = %d (%s)\n", 346 i, xdp_ring->itr_setting, 347 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ? 348 "dynamic" : "fixed"); 349 } 350 } 351 rcu_read_unlock(); 352 dev_info(&pf->pdev->dev, 353 " work_limit = %d\n", 354 vsi->work_limit); 355 dev_info(&pf->pdev->dev, 356 " max_frame = %d, rx_buf_len = %d dtype = %d\n", 357 vsi->max_frame, vsi->rx_buf_len, 0); 358 dev_info(&pf->pdev->dev, 359 " num_q_vectors = %i, base_vector = %i\n", 360 vsi->num_q_vectors, vsi->base_vector); 361 dev_info(&pf->pdev->dev, 362 " seid = %d, id = %d, uplink_seid = %d\n", 363 vsi->seid, vsi->id, vsi->uplink_seid); 364 dev_info(&pf->pdev->dev, 365 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n", 366 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc, 367 vsi->num_rx_desc); 368 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); 369 if (vsi->type == I40E_VSI_SRIOV) 370 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id); 371 dev_info(&pf->pdev->dev, 372 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", 373 vsi->info.valid_sections, vsi->info.switch_id); 374 dev_info(&pf->pdev->dev, 375 " info: sw_reserved[] = 0x%02x 0x%02x\n", 376 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); 377 dev_info(&pf->pdev->dev, 378 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", 379 vsi->info.sec_flags, vsi->info.sec_reserved); 380 dev_info(&pf->pdev->dev, 381 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", 382 vsi->info.pvid, vsi->info.fcoe_pvid, 383 vsi->info.port_vlan_flags); 384 dev_info(&pf->pdev->dev, 385 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", 386 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], 387 vsi->info.pvlan_reserved[2]); 388 dev_info(&pf->pdev->dev, 389 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", 390 vsi->info.ingress_table, vsi->info.egress_table); 391 dev_info(&pf->pdev->dev, 392 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", 393 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, 394 vsi->info.cas_pv_reserved); 395 dev_info(&pf->pdev->dev, 396 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 397 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], 398 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], 399 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], 400 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); 401 dev_info(&pf->pdev->dev, 402 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 403 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], 404 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], 405 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], 406 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); 407 dev_info(&pf->pdev->dev, 408 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 409 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], 410 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], 411 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], 412 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); 413 dev_info(&pf->pdev->dev, 414 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", 415 vsi->info.queueing_opt_flags, 416 vsi->info.queueing_opt_reserved[0], 417 vsi->info.queueing_opt_reserved[1], 418 vsi->info.queueing_opt_reserved[2]); 419 dev_info(&pf->pdev->dev, 420 " info: up_enable_bits = 0x%02x\n", 421 vsi->info.up_enable_bits); 422 dev_info(&pf->pdev->dev, 423 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", 424 vsi->info.sched_reserved, vsi->info.outer_up_table); 425 dev_info(&pf->pdev->dev, 426 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", 427 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], 428 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], 429 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], 430 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); 431 dev_info(&pf->pdev->dev, 432 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", 433 vsi->info.qs_handle[0], vsi->info.qs_handle[1], 434 vsi->info.qs_handle[2], vsi->info.qs_handle[3], 435 vsi->info.qs_handle[4], vsi->info.qs_handle[5], 436 vsi->info.qs_handle[6], vsi->info.qs_handle[7]); 437 dev_info(&pf->pdev->dev, 438 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", 439 vsi->info.stat_counter_idx, vsi->info.sched_id); 440 dev_info(&pf->pdev->dev, 441 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", 442 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], 443 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], 444 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], 445 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], 446 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], 447 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); 448 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); 449 dev_info(&pf->pdev->dev, 450 " tc_config: numtc = %d, enabled_tc = 0x%x\n", 451 vsi->tc_config.numtc, vsi->tc_config.enabled_tc); 452 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 453 dev_info(&pf->pdev->dev, 454 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", 455 i, vsi->tc_config.tc_info[i].qoffset, 456 vsi->tc_config.tc_info[i].qcount, 457 vsi->tc_config.tc_info[i].netdev_tc); 458 } 459 dev_info(&pf->pdev->dev, 460 " bw: bw_limit = %d, bw_max_quanta = %d\n", 461 vsi->bw_limit, vsi->bw_max_quanta); 462 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 463 dev_info(&pf->pdev->dev, 464 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", 465 i, vsi->bw_ets_share_credits[i], 466 vsi->bw_ets_limit_credits[i], 467 vsi->bw_ets_max_quanta[i]); 468 } 469 } 470 471 /** 472 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum 473 * @pf: the i40e_pf created in command write 474 **/ 475 static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) 476 { 477 struct i40e_adminq_ring *ring; 478 struct i40e_hw *hw = &pf->hw; 479 char hdr[32]; 480 int i; 481 482 snprintf(hdr, sizeof(hdr), "%s %s: ", 483 dev_driver_string(&pf->pdev->dev), 484 dev_name(&pf->pdev->dev)); 485 486 /* first the send (command) ring, then the receive (event) ring */ 487 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); 488 ring = &(hw->aq.asq); 489 for (i = 0; i < ring->count; i++) { 490 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); 491 492 dev_info(&pf->pdev->dev, 493 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 494 i, d->flags, d->opcode, d->datalen, d->retval, 495 d->cookie_high, d->cookie_low); 496 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 497 16, 1, d->params.raw, 16, 0); 498 } 499 500 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); 501 ring = &(hw->aq.arq); 502 for (i = 0; i < ring->count; i++) { 503 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); 504 505 dev_info(&pf->pdev->dev, 506 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 507 i, d->flags, d->opcode, d->datalen, d->retval, 508 d->cookie_high, d->cookie_low); 509 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 510 16, 1, d->params.raw, 16, 0); 511 } 512 } 513 514 /** 515 * i40e_dbg_dump_desc - handles dump desc write into command datum 516 * @cnt: number of arguments that the user supplied 517 * @vsi_seid: vsi id entered by user 518 * @ring_id: ring id entered by user 519 * @desc_n: descriptor number entered by user 520 * @pf: the i40e_pf created in command write 521 * @type: enum describing whether ring is RX, TX or XDP 522 **/ 523 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, 524 struct i40e_pf *pf, enum ring_type type) 525 { 526 bool is_rx_ring = type == RING_TYPE_RX; 527 struct i40e_tx_desc *txd; 528 union i40e_rx_desc *rxd; 529 struct i40e_ring *ring; 530 struct i40e_vsi *vsi; 531 int i; 532 533 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 534 if (!vsi) { 535 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); 536 return; 537 } 538 if (vsi->type != I40E_VSI_MAIN && 539 vsi->type != I40E_VSI_FDIR && 540 vsi->type != I40E_VSI_VMDQ2) { 541 dev_info(&pf->pdev->dev, 542 "vsi %d type %d descriptor rings not available\n", 543 vsi_seid, vsi->type); 544 return; 545 } 546 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { 547 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); 548 return; 549 } 550 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { 551 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); 552 return; 553 } 554 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { 555 dev_info(&pf->pdev->dev, 556 "descriptor rings have not been allocated for vsi %d\n", 557 vsi_seid); 558 return; 559 } 560 561 switch (type) { 562 case RING_TYPE_RX: 563 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL); 564 break; 565 case RING_TYPE_TX: 566 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL); 567 break; 568 case RING_TYPE_XDP: 569 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); 570 break; 571 default: 572 ring = NULL; 573 break; 574 } 575 if (!ring) 576 return; 577 578 if (cnt == 2) { 579 switch (type) { 580 case RING_TYPE_RX: 581 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id); 582 break; 583 case RING_TYPE_TX: 584 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id); 585 break; 586 case RING_TYPE_XDP: 587 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id); 588 break; 589 } 590 for (i = 0; i < ring->count; i++) { 591 if (!is_rx_ring) { 592 txd = I40E_TX_DESC(ring, i); 593 dev_info(&pf->pdev->dev, 594 " d[%03x] = 0x%016llx 0x%016llx\n", 595 i, txd->buffer_addr, 596 txd->cmd_type_offset_bsz); 597 } else { 598 rxd = I40E_RX_DESC(ring, i); 599 dev_info(&pf->pdev->dev, 600 " d[%03x] = 0x%016llx 0x%016llx\n", 601 i, rxd->read.pkt_addr, 602 rxd->read.hdr_addr); 603 } 604 } 605 } else if (cnt == 3) { 606 if (desc_n >= ring->count || desc_n < 0) { 607 dev_info(&pf->pdev->dev, 608 "descriptor %d not found\n", desc_n); 609 goto out; 610 } 611 if (!is_rx_ring) { 612 txd = I40E_TX_DESC(ring, desc_n); 613 dev_info(&pf->pdev->dev, 614 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", 615 vsi_seid, ring_id, desc_n, 616 txd->buffer_addr, txd->cmd_type_offset_bsz); 617 } else { 618 rxd = I40E_RX_DESC(ring, desc_n); 619 dev_info(&pf->pdev->dev, 620 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", 621 vsi_seid, ring_id, desc_n, 622 rxd->read.pkt_addr, rxd->read.hdr_addr); 623 } 624 } else { 625 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n"); 626 } 627 628 out: 629 kfree(ring); 630 } 631 632 /** 633 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum 634 * @pf: the i40e_pf created in command write 635 **/ 636 static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) 637 { 638 struct i40e_vsi *vsi; 639 int i; 640 641 i40e_pf_for_each_vsi(pf, i, vsi) 642 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid); 643 } 644 645 /** 646 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum 647 * @pf: the i40e_pf created in command write 648 * @estats: the eth stats structure to be dumped 649 **/ 650 static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, 651 struct i40e_eth_stats *estats) 652 { 653 dev_info(&pf->pdev->dev, " ethstats:\n"); 654 dev_info(&pf->pdev->dev, 655 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", 656 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); 657 dev_info(&pf->pdev->dev, 658 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", 659 estats->rx_broadcast, estats->rx_discards); 660 dev_info(&pf->pdev->dev, 661 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", 662 estats->rx_unknown_protocol, estats->tx_bytes); 663 dev_info(&pf->pdev->dev, 664 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", 665 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); 666 dev_info(&pf->pdev->dev, 667 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", 668 estats->tx_discards, estats->tx_errors); 669 } 670 671 /** 672 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb 673 * @pf: the i40e_pf created in command write 674 * @seid: the seid the user put in 675 **/ 676 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) 677 { 678 struct i40e_veb *veb; 679 680 veb = i40e_pf_get_veb_by_seid(pf, seid); 681 if (!veb) { 682 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); 683 return; 684 } 685 dev_info(&pf->pdev->dev, 686 "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n", 687 veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid, 688 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 689 i40e_dbg_dump_eth_stats(pf, &veb->stats); 690 } 691 692 /** 693 * i40e_dbg_dump_veb_all - dumps all known veb's stats 694 * @pf: the i40e_pf created in command write 695 **/ 696 static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) 697 { 698 struct i40e_veb *veb; 699 int i; 700 701 i40e_pf_for_each_veb(pf, i, veb) 702 i40e_dbg_dump_veb_seid(pf, veb->seid); 703 } 704 705 /** 706 * i40e_dbg_dump_vf - dump VF info 707 * @pf: the i40e_pf created in command write 708 * @vf_id: the vf_id from the user 709 **/ 710 static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) 711 { 712 struct i40e_vf *vf; 713 struct i40e_vsi *vsi; 714 715 if (!pf->num_alloc_vfs) { 716 dev_info(&pf->pdev->dev, "no VFs allocated\n"); 717 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) { 718 vf = &pf->vf[vf_id]; 719 vsi = pf->vsi[vf->lan_vsi_idx]; 720 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", 721 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); 722 dev_info(&pf->pdev->dev, " num MDD=%lld\n", 723 vf->num_mdd_events); 724 } else { 725 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); 726 } 727 } 728 729 /** 730 * i40e_dbg_dump_vf_all - dump VF info for all VFs 731 * @pf: the i40e_pf created in command write 732 **/ 733 static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) 734 { 735 int i; 736 737 if (!pf->num_alloc_vfs) 738 dev_info(&pf->pdev->dev, "no VFs enabled!\n"); 739 else 740 for (i = 0; i < pf->num_alloc_vfs; i++) 741 i40e_dbg_dump_vf(pf, i); 742 } 743 744 /** 745 * i40e_dbg_command_write - write into command datum 746 * @filp: the opened file 747 * @buffer: where to find the user's data 748 * @count: the length of the user's data 749 * @ppos: file position offset 750 **/ 751 static ssize_t i40e_dbg_command_write(struct file *filp, 752 const char __user *buffer, 753 size_t count, loff_t *ppos) 754 { 755 struct i40e_pf *pf = filp->private_data; 756 char *cmd_buf, *cmd_buf_tmp; 757 int bytes_not_copied; 758 struct i40e_vsi *vsi; 759 int vsi_seid; 760 int veb_seid; 761 int vf_id; 762 int cnt; 763 764 /* don't allow partial writes */ 765 if (*ppos != 0) 766 return 0; 767 768 cmd_buf = kzalloc(count + 1, GFP_KERNEL); 769 if (!cmd_buf) 770 return count; 771 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 772 if (bytes_not_copied) { 773 kfree(cmd_buf); 774 return -EFAULT; 775 } 776 cmd_buf[count] = '\0'; 777 778 cmd_buf_tmp = strchr(cmd_buf, '\n'); 779 if (cmd_buf_tmp) { 780 *cmd_buf_tmp = '\0'; 781 count = cmd_buf_tmp - cmd_buf + 1; 782 } 783 784 if (strncmp(cmd_buf, "add vsi", 7) == 0) { 785 vsi_seid = -1; 786 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 787 if (cnt == 0) { 788 /* default to PF VSI */ 789 vsi_seid = pf->vsi[pf->lan_vsi]->seid; 790 } else if (vsi_seid < 0) { 791 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", 792 vsi_seid); 793 goto command_write_done; 794 } 795 796 /* By default we are in VEPA mode, if this is the first VF/VMDq 797 * VSI to be added switch to VEB mode. 798 */ 799 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { 800 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); 801 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); 802 } 803 804 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); 805 if (vsi) 806 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", 807 vsi->seid, vsi->uplink_seid); 808 else 809 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); 810 811 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { 812 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 813 if (cnt != 1) { 814 dev_info(&pf->pdev->dev, 815 "del vsi: bad command string, cnt=%d\n", 816 cnt); 817 goto command_write_done; 818 } 819 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 820 if (!vsi) { 821 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", 822 vsi_seid); 823 goto command_write_done; 824 } 825 826 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); 827 i40e_vsi_release(vsi); 828 829 } else if (strncmp(cmd_buf, "add relay", 9) == 0) { 830 struct i40e_veb *veb; 831 u8 enabled_tc = 0x1; 832 int uplink_seid; 833 834 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); 835 if (cnt == 0) { 836 uplink_seid = 0; 837 vsi_seid = 0; 838 } else if (cnt != 2) { 839 dev_info(&pf->pdev->dev, 840 "add relay: bad command string, cnt=%d\n", 841 cnt); 842 goto command_write_done; 843 } else if (uplink_seid < 0) { 844 dev_info(&pf->pdev->dev, 845 "add relay %d: bad uplink seid\n", 846 uplink_seid); 847 goto command_write_done; 848 } 849 850 if (uplink_seid != 0 && uplink_seid != pf->mac_seid) { 851 dev_info(&pf->pdev->dev, 852 "add relay: relay uplink %d not found\n", 853 uplink_seid); 854 goto command_write_done; 855 } else if (uplink_seid) { 856 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid); 857 if (!vsi) { 858 dev_info(&pf->pdev->dev, 859 "add relay: VSI %d not found\n", 860 vsi_seid); 861 goto command_write_done; 862 } 863 enabled_tc = vsi->tc_config.enabled_tc; 864 } else if (vsi_seid) { 865 dev_info(&pf->pdev->dev, 866 "add relay: VSI must be 0 for floating relay\n"); 867 goto command_write_done; 868 } 869 870 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc); 871 if (veb) 872 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); 873 else 874 dev_info(&pf->pdev->dev, "add relay failed\n"); 875 876 } else if (strncmp(cmd_buf, "del relay", 9) == 0) { 877 struct i40e_veb *veb; 878 int i; 879 880 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); 881 if (cnt != 1) { 882 dev_info(&pf->pdev->dev, 883 "del relay: bad command string, cnt=%d\n", 884 cnt); 885 goto command_write_done; 886 } else if (veb_seid < 0) { 887 dev_info(&pf->pdev->dev, 888 "del relay %d: bad relay seid\n", veb_seid); 889 goto command_write_done; 890 } 891 892 /* find the veb */ 893 i40e_pf_for_each_veb(pf, i, veb) 894 if (veb->seid == veb_seid) 895 break; 896 897 if (i >= I40E_MAX_VEB) { 898 dev_info(&pf->pdev->dev, 899 "del relay: relay %d not found\n", veb_seid); 900 goto command_write_done; 901 } 902 903 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); 904 i40e_veb_release(veb); 905 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { 906 unsigned int v; 907 int ret; 908 u16 vid; 909 910 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); 911 if (cnt != 2) { 912 dev_info(&pf->pdev->dev, 913 "add pvid: bad command string, cnt=%d\n", cnt); 914 goto command_write_done; 915 } 916 917 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 918 if (!vsi) { 919 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", 920 vsi_seid); 921 goto command_write_done; 922 } 923 924 vid = v; 925 ret = i40e_vsi_add_pvid(vsi, vid); 926 if (!ret) 927 dev_info(&pf->pdev->dev, 928 "add pvid: %d added to VSI %d\n", 929 vid, vsi_seid); 930 else 931 dev_info(&pf->pdev->dev, 932 "add pvid: %d to VSI %d failed, ret=%d\n", 933 vid, vsi_seid, ret); 934 935 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { 936 937 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 938 if (cnt != 1) { 939 dev_info(&pf->pdev->dev, 940 "del pvid: bad command string, cnt=%d\n", 941 cnt); 942 goto command_write_done; 943 } 944 945 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 946 if (!vsi) { 947 dev_info(&pf->pdev->dev, 948 "del pvid: VSI %d not found\n", vsi_seid); 949 goto command_write_done; 950 } 951 952 i40e_vsi_remove_pvid(vsi); 953 dev_info(&pf->pdev->dev, 954 "del pvid: removed from VSI %d\n", vsi_seid); 955 956 } else if (strncmp(cmd_buf, "dump", 4) == 0) { 957 if (strncmp(&cmd_buf[5], "switch", 6) == 0) { 958 i40e_fetch_switch_configuration(pf, true); 959 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { 960 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 961 if (cnt > 0) 962 i40e_dbg_dump_vsi_seid(pf, vsi_seid); 963 else 964 i40e_dbg_dump_vsi_no_seid(pf); 965 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { 966 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); 967 if (cnt > 0) 968 i40e_dbg_dump_veb_seid(pf, vsi_seid); 969 else 970 i40e_dbg_dump_veb_all(pf); 971 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) { 972 cnt = sscanf(&cmd_buf[7], "%i", &vf_id); 973 if (cnt > 0) 974 i40e_dbg_dump_vf(pf, vf_id); 975 else 976 i40e_dbg_dump_vf_all(pf); 977 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { 978 int ring_id, desc_n; 979 if (strncmp(&cmd_buf[10], "rx", 2) == 0) { 980 cnt = sscanf(&cmd_buf[12], "%i %i %i", 981 &vsi_seid, &ring_id, &desc_n); 982 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 983 desc_n, pf, RING_TYPE_RX); 984 } else if (strncmp(&cmd_buf[10], "tx", 2) 985 == 0) { 986 cnt = sscanf(&cmd_buf[12], "%i %i %i", 987 &vsi_seid, &ring_id, &desc_n); 988 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 989 desc_n, pf, RING_TYPE_TX); 990 } else if (strncmp(&cmd_buf[10], "xdp", 3) 991 == 0) { 992 cnt = sscanf(&cmd_buf[13], "%i %i %i", 993 &vsi_seid, &ring_id, &desc_n); 994 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, 995 desc_n, pf, RING_TYPE_XDP); 996 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { 997 i40e_dbg_dump_aq_desc(pf); 998 } else { 999 dev_info(&pf->pdev->dev, 1000 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1001 dev_info(&pf->pdev->dev, 1002 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1003 dev_info(&pf->pdev->dev, 1004 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n"); 1005 dev_info(&pf->pdev->dev, "dump desc aq\n"); 1006 } 1007 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { 1008 dev_info(&pf->pdev->dev, 1009 "core reset count: %d\n", pf->corer_count); 1010 dev_info(&pf->pdev->dev, 1011 "global reset count: %d\n", pf->globr_count); 1012 dev_info(&pf->pdev->dev, 1013 "emp reset count: %d\n", pf->empr_count); 1014 dev_info(&pf->pdev->dev, 1015 "pf reset count: %d\n", pf->pfr_count); 1016 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { 1017 struct i40e_aqc_query_port_ets_config_resp *bw_data; 1018 struct i40e_dcbx_config *cfg = 1019 &pf->hw.local_dcbx_config; 1020 struct i40e_dcbx_config *r_cfg = 1021 &pf->hw.remote_dcbx_config; 1022 int i, ret; 1023 u16 switch_id; 1024 1025 bw_data = kzalloc(sizeof( 1026 struct i40e_aqc_query_port_ets_config_resp), 1027 GFP_KERNEL); 1028 if (!bw_data) { 1029 ret = -ENOMEM; 1030 goto command_write_done; 1031 } 1032 1033 vsi = pf->vsi[pf->lan_vsi]; 1034 switch_id = 1035 le16_to_cpu(vsi->info.switch_id) & 1036 I40E_AQ_VSI_SW_ID_MASK; 1037 1038 ret = i40e_aq_query_port_ets_config(&pf->hw, 1039 switch_id, 1040 bw_data, NULL); 1041 if (ret) { 1042 dev_info(&pf->pdev->dev, 1043 "Query Port ETS Config AQ command failed =0x%x\n", 1044 pf->hw.aq.asq_last_status); 1045 kfree(bw_data); 1046 bw_data = NULL; 1047 goto command_write_done; 1048 } 1049 dev_info(&pf->pdev->dev, 1050 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", 1051 bw_data->tc_valid_bits, 1052 bw_data->tc_strict_priority_bits, 1053 le16_to_cpu(bw_data->tc_bw_max[0]), 1054 le16_to_cpu(bw_data->tc_bw_max[1])); 1055 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1056 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", 1057 bw_data->tc_bw_share_credits[i], 1058 le16_to_cpu(bw_data->tc_bw_limits[i])); 1059 } 1060 1061 kfree(bw_data); 1062 bw_data = NULL; 1063 1064 dev_info(&pf->pdev->dev, 1065 "port dcbx_mode=%d\n", cfg->dcbx_mode); 1066 dev_info(&pf->pdev->dev, 1067 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", 1068 cfg->etscfg.willing, cfg->etscfg.cbs, 1069 cfg->etscfg.maxtcs); 1070 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1071 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1072 i, cfg->etscfg.prioritytable[i], 1073 cfg->etscfg.tcbwtable[i], 1074 cfg->etscfg.tsatable[i]); 1075 } 1076 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1077 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1078 i, cfg->etsrec.prioritytable[i], 1079 cfg->etsrec.tcbwtable[i], 1080 cfg->etsrec.tsatable[i]); 1081 } 1082 dev_info(&pf->pdev->dev, 1083 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", 1084 cfg->pfc.willing, cfg->pfc.mbc, 1085 cfg->pfc.pfccap, cfg->pfc.pfcenable); 1086 dev_info(&pf->pdev->dev, 1087 "port app_table: num_apps=%d\n", cfg->numapps); 1088 for (i = 0; i < cfg->numapps; i++) { 1089 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", 1090 i, cfg->app[i].priority, 1091 cfg->app[i].selector, 1092 cfg->app[i].protocolid); 1093 } 1094 /* Peer TLV DCBX data */ 1095 dev_info(&pf->pdev->dev, 1096 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", 1097 r_cfg->etscfg.willing, 1098 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); 1099 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1100 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1101 i, r_cfg->etscfg.prioritytable[i], 1102 r_cfg->etscfg.tcbwtable[i], 1103 r_cfg->etscfg.tsatable[i]); 1104 } 1105 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1106 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", 1107 i, r_cfg->etsrec.prioritytable[i], 1108 r_cfg->etsrec.tcbwtable[i], 1109 r_cfg->etsrec.tsatable[i]); 1110 } 1111 dev_info(&pf->pdev->dev, 1112 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", 1113 r_cfg->pfc.willing, 1114 r_cfg->pfc.mbc, 1115 r_cfg->pfc.pfccap, 1116 r_cfg->pfc.pfcenable); 1117 dev_info(&pf->pdev->dev, 1118 "remote port app_table: num_apps=%d\n", 1119 r_cfg->numapps); 1120 for (i = 0; i < r_cfg->numapps; i++) { 1121 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", 1122 i, r_cfg->app[i].priority, 1123 r_cfg->app[i].selector, 1124 r_cfg->app[i].protocolid); 1125 } 1126 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { 1127 int cluster_id, table_id; 1128 int index, ret; 1129 u16 buff_len = 4096; 1130 u32 next_index; 1131 u8 next_table; 1132 u8 *buff; 1133 u16 rlen; 1134 1135 cnt = sscanf(&cmd_buf[18], "%i %i %i", 1136 &cluster_id, &table_id, &index); 1137 if (cnt != 3) { 1138 dev_info(&pf->pdev->dev, 1139 "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1140 goto command_write_done; 1141 } 1142 1143 dev_info(&pf->pdev->dev, 1144 "AQ debug dump fwdata params %x %x %x %x\n", 1145 cluster_id, table_id, index, buff_len); 1146 buff = kzalloc(buff_len, GFP_KERNEL); 1147 if (!buff) 1148 goto command_write_done; 1149 1150 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, 1151 index, buff_len, buff, &rlen, 1152 &next_table, &next_index, 1153 NULL); 1154 if (ret) { 1155 dev_info(&pf->pdev->dev, 1156 "debug dump fwdata AQ Failed %d 0x%x\n", 1157 ret, pf->hw.aq.asq_last_status); 1158 kfree(buff); 1159 buff = NULL; 1160 goto command_write_done; 1161 } 1162 dev_info(&pf->pdev->dev, 1163 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", 1164 rlen, next_table, next_index); 1165 print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1166 DUMP_PREFIX_OFFSET, 16, 1, 1167 buff, rlen, true); 1168 kfree(buff); 1169 buff = NULL; 1170 } else { 1171 dev_info(&pf->pdev->dev, 1172 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n"); 1173 dev_info(&pf->pdev->dev, "dump switch\n"); 1174 dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); 1175 dev_info(&pf->pdev->dev, "dump reset stats\n"); 1176 dev_info(&pf->pdev->dev, "dump port\n"); 1177 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n"); 1178 dev_info(&pf->pdev->dev, 1179 "dump debug fwdata <cluster_id> <table_id> <index>\n"); 1180 } 1181 } else if (strncmp(cmd_buf, "pfr", 3) == 0) { 1182 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); 1183 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); 1184 1185 } else if (strncmp(cmd_buf, "corer", 5) == 0) { 1186 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); 1187 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); 1188 1189 } else if (strncmp(cmd_buf, "globr", 5) == 0) { 1190 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); 1191 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); 1192 1193 } else if (strncmp(cmd_buf, "read", 4) == 0) { 1194 u32 address; 1195 u32 value; 1196 1197 cnt = sscanf(&cmd_buf[4], "%i", &address); 1198 if (cnt != 1) { 1199 dev_info(&pf->pdev->dev, "read <reg>\n"); 1200 goto command_write_done; 1201 } 1202 1203 /* check the range on address */ 1204 if (address > (pf->ioremap_len - sizeof(u32))) { 1205 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", 1206 address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); 1207 goto command_write_done; 1208 } 1209 1210 value = rd32(&pf->hw, address); 1211 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", 1212 address, value); 1213 1214 } else if (strncmp(cmd_buf, "write", 5) == 0) { 1215 u32 address, value; 1216 1217 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); 1218 if (cnt != 2) { 1219 dev_info(&pf->pdev->dev, "write <reg> <value>\n"); 1220 goto command_write_done; 1221 } 1222 1223 /* check the range on address */ 1224 if (address > (pf->ioremap_len - sizeof(u32))) { 1225 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", 1226 address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); 1227 goto command_write_done; 1228 } 1229 wr32(&pf->hw, address, value); 1230 value = rd32(&pf->hw, address); 1231 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", 1232 address, value); 1233 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { 1234 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { 1235 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1236 if (cnt == 0) { 1237 int i; 1238 1239 i40e_pf_for_each_vsi(pf, i, vsi) 1240 i40e_vsi_reset_stats(vsi); 1241 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1242 } else if (cnt == 1) { 1243 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1244 if (!vsi) { 1245 dev_info(&pf->pdev->dev, 1246 "clear_stats vsi: bad vsi %d\n", 1247 vsi_seid); 1248 goto command_write_done; 1249 } 1250 i40e_vsi_reset_stats(vsi); 1251 dev_info(&pf->pdev->dev, 1252 "vsi clear stats called for vsi %d\n", 1253 vsi_seid); 1254 } else { 1255 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); 1256 } 1257 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { 1258 if (pf->hw.partition_id == 1) { 1259 i40e_pf_reset_stats(pf); 1260 dev_info(&pf->pdev->dev, "port stats cleared\n"); 1261 } else { 1262 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); 1263 } 1264 } else { 1265 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); 1266 } 1267 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { 1268 struct i40e_aq_desc *desc; 1269 int ret; 1270 1271 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); 1272 if (!desc) 1273 goto command_write_done; 1274 cnt = sscanf(&cmd_buf[11], 1275 "%hi %hi %hi %hi %i %i %i %i %i %i", 1276 &desc->flags, 1277 &desc->opcode, &desc->datalen, &desc->retval, 1278 &desc->cookie_high, &desc->cookie_low, 1279 &desc->params.internal.param0, 1280 &desc->params.internal.param1, 1281 &desc->params.internal.param2, 1282 &desc->params.internal.param3); 1283 if (cnt != 10) { 1284 dev_info(&pf->pdev->dev, 1285 "send aq_cmd: bad command string, cnt=%d\n", 1286 cnt); 1287 kfree(desc); 1288 desc = NULL; 1289 goto command_write_done; 1290 } 1291 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); 1292 if (!ret) { 1293 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); 1294 } else if (ret == -EIO) { 1295 dev_info(&pf->pdev->dev, 1296 "AQ command send failed Opcode %x AQ Error: %d\n", 1297 desc->opcode, pf->hw.aq.asq_last_status); 1298 } else { 1299 dev_info(&pf->pdev->dev, 1300 "AQ command send failed Opcode %x Status: %d\n", 1301 desc->opcode, ret); 1302 } 1303 dev_info(&pf->pdev->dev, 1304 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1305 desc->flags, desc->opcode, desc->datalen, desc->retval, 1306 desc->cookie_high, desc->cookie_low, 1307 desc->params.internal.param0, 1308 desc->params.internal.param1, 1309 desc->params.internal.param2, 1310 desc->params.internal.param3); 1311 kfree(desc); 1312 desc = NULL; 1313 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { 1314 struct i40e_aq_desc *desc; 1315 u16 buffer_len; 1316 u8 *buff; 1317 int ret; 1318 1319 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); 1320 if (!desc) 1321 goto command_write_done; 1322 cnt = sscanf(&cmd_buf[20], 1323 "%hi %hi %hi %hi %i %i %i %i %i %i %hi", 1324 &desc->flags, 1325 &desc->opcode, &desc->datalen, &desc->retval, 1326 &desc->cookie_high, &desc->cookie_low, 1327 &desc->params.internal.param0, 1328 &desc->params.internal.param1, 1329 &desc->params.internal.param2, 1330 &desc->params.internal.param3, 1331 &buffer_len); 1332 if (cnt != 11) { 1333 dev_info(&pf->pdev->dev, 1334 "send indirect aq_cmd: bad command string, cnt=%d\n", 1335 cnt); 1336 kfree(desc); 1337 desc = NULL; 1338 goto command_write_done; 1339 } 1340 /* Just stub a buffer big enough in case user messed up */ 1341 if (buffer_len == 0) 1342 buffer_len = 1280; 1343 1344 buff = kzalloc(buffer_len, GFP_KERNEL); 1345 if (!buff) { 1346 kfree(desc); 1347 desc = NULL; 1348 goto command_write_done; 1349 } 1350 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1351 ret = i40e_asq_send_command(&pf->hw, desc, buff, 1352 buffer_len, NULL); 1353 if (!ret) { 1354 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); 1355 } else if (ret == -EIO) { 1356 dev_info(&pf->pdev->dev, 1357 "AQ command send failed Opcode %x AQ Error: %d\n", 1358 desc->opcode, pf->hw.aq.asq_last_status); 1359 } else { 1360 dev_info(&pf->pdev->dev, 1361 "AQ command send failed Opcode %x Status: %d\n", 1362 desc->opcode, ret); 1363 } 1364 dev_info(&pf->pdev->dev, 1365 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1366 desc->flags, desc->opcode, desc->datalen, desc->retval, 1367 desc->cookie_high, desc->cookie_low, 1368 desc->params.internal.param0, 1369 desc->params.internal.param1, 1370 desc->params.internal.param2, 1371 desc->params.internal.param3); 1372 print_hex_dump(KERN_INFO, "AQ buffer WB: ", 1373 DUMP_PREFIX_OFFSET, 16, 1, 1374 buff, buffer_len, true); 1375 kfree(buff); 1376 buff = NULL; 1377 kfree(desc); 1378 desc = NULL; 1379 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { 1380 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", 1381 i40e_get_current_fd_count(pf)); 1382 } else if (strncmp(cmd_buf, "lldp", 4) == 0) { 1383 if (strncmp(&cmd_buf[5], "stop", 4) == 0) { 1384 int ret; 1385 1386 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL); 1387 if (ret) { 1388 dev_info(&pf->pdev->dev, 1389 "Stop LLDP AQ command failed =0x%x\n", 1390 pf->hw.aq.asq_last_status); 1391 goto command_write_done; 1392 } 1393 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, 1394 pf->hw.mac.addr, 1395 ETH_P_LLDP, 0, 1396 pf->vsi[pf->lan_vsi]->seid, 1397 0, true, NULL, NULL); 1398 if (ret) { 1399 dev_info(&pf->pdev->dev, 1400 "%s: Add Control Packet Filter AQ command failed =0x%x\n", 1401 __func__, pf->hw.aq.asq_last_status); 1402 goto command_write_done; 1403 } 1404 #ifdef CONFIG_I40E_DCB 1405 pf->dcbx_cap = DCB_CAP_DCBX_HOST | 1406 DCB_CAP_DCBX_VER_IEEE; 1407 #endif /* CONFIG_I40E_DCB */ 1408 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { 1409 int ret; 1410 1411 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, 1412 pf->hw.mac.addr, 1413 ETH_P_LLDP, 0, 1414 pf->vsi[pf->lan_vsi]->seid, 1415 0, false, NULL, NULL); 1416 if (ret) { 1417 dev_info(&pf->pdev->dev, 1418 "%s: Remove Control Packet Filter AQ command failed =0x%x\n", 1419 __func__, pf->hw.aq.asq_last_status); 1420 /* Continue and start FW LLDP anyways */ 1421 } 1422 1423 ret = i40e_aq_start_lldp(&pf->hw, false, NULL); 1424 if (ret) { 1425 dev_info(&pf->pdev->dev, 1426 "Start LLDP AQ command failed =0x%x\n", 1427 pf->hw.aq.asq_last_status); 1428 goto command_write_done; 1429 } 1430 #ifdef CONFIG_I40E_DCB 1431 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 1432 DCB_CAP_DCBX_VER_IEEE; 1433 #endif /* CONFIG_I40E_DCB */ 1434 } else if (strncmp(&cmd_buf[5], 1435 "get local", 9) == 0) { 1436 u16 llen, rlen; 1437 int ret; 1438 u8 *buff; 1439 1440 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1441 if (!buff) 1442 goto command_write_done; 1443 1444 ret = i40e_aq_get_lldp_mib(&pf->hw, 0, 1445 I40E_AQ_LLDP_MIB_LOCAL, 1446 buff, I40E_LLDPDU_SIZE, 1447 &llen, &rlen, NULL); 1448 if (ret) { 1449 dev_info(&pf->pdev->dev, 1450 "Get LLDP MIB (local) AQ command failed =0x%x\n", 1451 pf->hw.aq.asq_last_status); 1452 kfree(buff); 1453 buff = NULL; 1454 goto command_write_done; 1455 } 1456 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); 1457 print_hex_dump(KERN_INFO, "LLDP MIB (local): ", 1458 DUMP_PREFIX_OFFSET, 16, 1, 1459 buff, I40E_LLDPDU_SIZE, true); 1460 kfree(buff); 1461 buff = NULL; 1462 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { 1463 u16 llen, rlen; 1464 int ret; 1465 u8 *buff; 1466 1467 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1468 if (!buff) 1469 goto command_write_done; 1470 1471 ret = i40e_aq_get_lldp_mib(&pf->hw, 1472 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 1473 I40E_AQ_LLDP_MIB_REMOTE, 1474 buff, I40E_LLDPDU_SIZE, 1475 &llen, &rlen, NULL); 1476 if (ret) { 1477 dev_info(&pf->pdev->dev, 1478 "Get LLDP MIB (remote) AQ command failed =0x%x\n", 1479 pf->hw.aq.asq_last_status); 1480 kfree(buff); 1481 buff = NULL; 1482 goto command_write_done; 1483 } 1484 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); 1485 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", 1486 DUMP_PREFIX_OFFSET, 16, 1, 1487 buff, I40E_LLDPDU_SIZE, true); 1488 kfree(buff); 1489 buff = NULL; 1490 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { 1491 int ret; 1492 1493 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, 1494 true, NULL); 1495 if (ret) { 1496 dev_info(&pf->pdev->dev, 1497 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", 1498 pf->hw.aq.asq_last_status); 1499 goto command_write_done; 1500 } 1501 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { 1502 int ret; 1503 1504 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, 1505 false, NULL); 1506 if (ret) { 1507 dev_info(&pf->pdev->dev, 1508 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", 1509 pf->hw.aq.asq_last_status); 1510 goto command_write_done; 1511 } 1512 } 1513 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { 1514 u16 buffer_len, bytes; 1515 u16 module; 1516 u32 offset; 1517 u16 *buff; 1518 int ret; 1519 1520 cnt = sscanf(&cmd_buf[8], "%hx %x %hx", 1521 &module, &offset, &buffer_len); 1522 if (cnt == 0) { 1523 module = 0; 1524 offset = 0; 1525 buffer_len = 0; 1526 } else if (cnt == 1) { 1527 offset = 0; 1528 buffer_len = 0; 1529 } else if (cnt == 2) { 1530 buffer_len = 0; 1531 } else if (cnt > 3) { 1532 dev_info(&pf->pdev->dev, 1533 "nvm read: bad command string, cnt=%d\n", cnt); 1534 goto command_write_done; 1535 } 1536 1537 /* set the max length */ 1538 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); 1539 1540 bytes = 2 * buffer_len; 1541 1542 /* read at least 1k bytes, no more than 4kB */ 1543 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); 1544 buff = kzalloc(bytes, GFP_KERNEL); 1545 if (!buff) 1546 goto command_write_done; 1547 1548 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 1549 if (ret) { 1550 dev_info(&pf->pdev->dev, 1551 "Failed Acquiring NVM resource for read err=%d status=0x%x\n", 1552 ret, pf->hw.aq.asq_last_status); 1553 kfree(buff); 1554 goto command_write_done; 1555 } 1556 1557 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), 1558 bytes, (u8 *)buff, true, NULL); 1559 i40e_release_nvm(&pf->hw); 1560 if (ret) { 1561 dev_info(&pf->pdev->dev, 1562 "Read NVM AQ failed err=%d status=0x%x\n", 1563 ret, pf->hw.aq.asq_last_status); 1564 } else { 1565 dev_info(&pf->pdev->dev, 1566 "Read NVM module=0x%x offset=0x%x words=%d\n", 1567 module, offset, buffer_len); 1568 if (bytes) 1569 print_hex_dump(KERN_INFO, "NVM Dump: ", 1570 DUMP_PREFIX_OFFSET, 16, 2, 1571 buff, bytes, true); 1572 } 1573 kfree(buff); 1574 buff = NULL; 1575 } else { 1576 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); 1577 dev_info(&pf->pdev->dev, "available commands\n"); 1578 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); 1579 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); 1580 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); 1581 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); 1582 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); 1583 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); 1584 dev_info(&pf->pdev->dev, " dump switch\n"); 1585 dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); 1586 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1587 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1588 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n"); 1589 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1590 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1591 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); 1592 dev_info(&pf->pdev->dev, " read <reg>\n"); 1593 dev_info(&pf->pdev->dev, " write <reg> <value>\n"); 1594 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); 1595 dev_info(&pf->pdev->dev, " clear_stats port\n"); 1596 dev_info(&pf->pdev->dev, " pfr\n"); 1597 dev_info(&pf->pdev->dev, " corer\n"); 1598 dev_info(&pf->pdev->dev, " globr\n"); 1599 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); 1600 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); 1601 dev_info(&pf->pdev->dev, " fd current cnt"); 1602 dev_info(&pf->pdev->dev, " lldp start\n"); 1603 dev_info(&pf->pdev->dev, " lldp stop\n"); 1604 dev_info(&pf->pdev->dev, " lldp get local\n"); 1605 dev_info(&pf->pdev->dev, " lldp get remote\n"); 1606 dev_info(&pf->pdev->dev, " lldp event on\n"); 1607 dev_info(&pf->pdev->dev, " lldp event off\n"); 1608 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); 1609 } 1610 1611 command_write_done: 1612 kfree(cmd_buf); 1613 cmd_buf = NULL; 1614 return count; 1615 } 1616 1617 static const struct file_operations i40e_dbg_command_fops = { 1618 .owner = THIS_MODULE, 1619 .open = simple_open, 1620 .read = i40e_dbg_command_read, 1621 .write = i40e_dbg_command_write, 1622 }; 1623 1624 /************************************************************** 1625 * netdev_ops 1626 * The netdev_ops entry in debugfs is for giving the driver commands 1627 * to be executed from the netdev operations. 1628 **************************************************************/ 1629 static char i40e_dbg_netdev_ops_buf[256] = ""; 1630 1631 /** 1632 * i40e_dbg_netdev_ops_read - read for netdev_ops datum 1633 * @filp: the opened file 1634 * @buffer: where to write the data for the user to read 1635 * @count: the size of the user's buffer 1636 * @ppos: file position offset 1637 **/ 1638 static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, 1639 size_t count, loff_t *ppos) 1640 { 1641 struct i40e_pf *pf = filp->private_data; 1642 int bytes_not_copied; 1643 int buf_size = 256; 1644 char *buf; 1645 int len; 1646 1647 /* don't allow partal reads */ 1648 if (*ppos != 0) 1649 return 0; 1650 if (count < buf_size) 1651 return -ENOSPC; 1652 1653 buf = kzalloc(buf_size, GFP_KERNEL); 1654 if (!buf) 1655 return -ENOSPC; 1656 1657 len = snprintf(buf, buf_size, "%s: %s\n", 1658 pf->vsi[pf->lan_vsi]->netdev->name, 1659 i40e_dbg_netdev_ops_buf); 1660 1661 bytes_not_copied = copy_to_user(buffer, buf, len); 1662 kfree(buf); 1663 1664 if (bytes_not_copied) 1665 return -EFAULT; 1666 1667 *ppos = len; 1668 return len; 1669 } 1670 1671 /** 1672 * i40e_dbg_netdev_ops_write - write into netdev_ops datum 1673 * @filp: the opened file 1674 * @buffer: where to find the user's data 1675 * @count: the length of the user's data 1676 * @ppos: file position offset 1677 **/ 1678 static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, 1679 const char __user *buffer, 1680 size_t count, loff_t *ppos) 1681 { 1682 struct i40e_pf *pf = filp->private_data; 1683 int bytes_not_copied; 1684 struct i40e_vsi *vsi; 1685 char *buf_tmp; 1686 int vsi_seid; 1687 int i, cnt; 1688 1689 /* don't allow partial writes */ 1690 if (*ppos != 0) 1691 return 0; 1692 if (count >= sizeof(i40e_dbg_netdev_ops_buf)) 1693 return -ENOSPC; 1694 1695 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); 1696 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, 1697 buffer, count); 1698 if (bytes_not_copied) 1699 return -EFAULT; 1700 i40e_dbg_netdev_ops_buf[count] = '\0'; 1701 1702 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); 1703 if (buf_tmp) { 1704 *buf_tmp = '\0'; 1705 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; 1706 } 1707 1708 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { 1709 int mtu; 1710 1711 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", 1712 &vsi_seid, &mtu); 1713 if (cnt != 2) { 1714 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n"); 1715 goto netdev_ops_write_done; 1716 } 1717 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1718 if (!vsi) { 1719 dev_info(&pf->pdev->dev, 1720 "change_mtu: VSI %d not found\n", vsi_seid); 1721 } else if (!vsi->netdev) { 1722 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", 1723 vsi_seid); 1724 } else if (rtnl_trylock()) { 1725 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, 1726 mtu); 1727 rtnl_unlock(); 1728 dev_info(&pf->pdev->dev, "change_mtu called\n"); 1729 } else { 1730 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); 1731 } 1732 1733 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { 1734 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); 1735 if (cnt != 1) { 1736 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n"); 1737 goto netdev_ops_write_done; 1738 } 1739 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1740 if (!vsi) { 1741 dev_info(&pf->pdev->dev, 1742 "set_rx_mode: VSI %d not found\n", vsi_seid); 1743 } else if (!vsi->netdev) { 1744 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", 1745 vsi_seid); 1746 } else if (rtnl_trylock()) { 1747 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); 1748 rtnl_unlock(); 1749 dev_info(&pf->pdev->dev, "set_rx_mode called\n"); 1750 } else { 1751 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); 1752 } 1753 1754 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { 1755 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); 1756 if (cnt != 1) { 1757 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n"); 1758 goto netdev_ops_write_done; 1759 } 1760 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 1761 if (!vsi) { 1762 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", 1763 vsi_seid); 1764 } else if (!vsi->netdev) { 1765 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", 1766 vsi_seid); 1767 } else { 1768 for (i = 0; i < vsi->num_q_vectors; i++) 1769 napi_schedule(&vsi->q_vectors[i]->napi); 1770 dev_info(&pf->pdev->dev, "napi called\n"); 1771 } 1772 } else { 1773 dev_info(&pf->pdev->dev, "unknown command '%s'\n", 1774 i40e_dbg_netdev_ops_buf); 1775 dev_info(&pf->pdev->dev, "available commands\n"); 1776 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n"); 1777 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n"); 1778 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n"); 1779 } 1780 netdev_ops_write_done: 1781 return count; 1782 } 1783 1784 static const struct file_operations i40e_dbg_netdev_ops_fops = { 1785 .owner = THIS_MODULE, 1786 .open = simple_open, 1787 .read = i40e_dbg_netdev_ops_read, 1788 .write = i40e_dbg_netdev_ops_write, 1789 }; 1790 1791 /** 1792 * i40e_dbg_pf_init - setup the debugfs directory for the PF 1793 * @pf: the PF that is starting up 1794 **/ 1795 void i40e_dbg_pf_init(struct i40e_pf *pf) 1796 { 1797 const char *name = pci_name(pf->pdev); 1798 1799 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); 1800 1801 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, 1802 &i40e_dbg_command_fops); 1803 1804 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, 1805 &i40e_dbg_netdev_ops_fops); 1806 } 1807 1808 /** 1809 * i40e_dbg_pf_exit - clear out the PF's debugfs entries 1810 * @pf: the PF that is stopping 1811 **/ 1812 void i40e_dbg_pf_exit(struct i40e_pf *pf) 1813 { 1814 debugfs_remove_recursive(pf->i40e_dbg_pf); 1815 pf->i40e_dbg_pf = NULL; 1816 } 1817 1818 /** 1819 * i40e_dbg_init - start up debugfs for the driver 1820 **/ 1821 void i40e_dbg_init(void) 1822 { 1823 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); 1824 if (IS_ERR(i40e_dbg_root)) 1825 pr_info("init of debugfs failed\n"); 1826 } 1827 1828 /** 1829 * i40e_dbg_exit - clean out the driver's debugfs entries 1830 **/ 1831 void i40e_dbg_exit(void) 1832 { 1833 debugfs_remove_recursive(i40e_dbg_root); 1834 i40e_dbg_root = NULL; 1835 } 1836 1837 #endif /* CONFIG_DEBUG_FS */ 1838