1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #ifdef CONFIG_DEBUG_FS
5
6 #include <linux/fs.h>
7 #include <linux/debugfs.h>
8 #include <linux/if_bridge.h>
9 #include "i40e.h"
10 #include "i40e_virtchnl_pf.h"
11
12 static struct dentry *i40e_dbg_root;
13
14 enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18 };
19
20 /**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
i40e_dbg_find_vsi(struct i40e_pf * pf,int seid)25 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26 {
27 if (seid < 0) {
28 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
29
30 return NULL;
31 }
32
33 return i40e_pf_get_vsi_by_seid(pf, seid);
34 }
35
36 /**************************************************************
37 * command
38 * The command entry in debugfs is for giving the driver commands
39 * to be executed - these may be for changing the internal switch
40 * setup, adding or removing filters, or other things. Many of
41 * these will be useful for some forms of unit testing.
42 **************************************************************/
43
44 static char *i40e_filter_state_string[] = {
45 "INVALID",
46 "NEW",
47 "ACTIVE",
48 "FAILED",
49 "REMOVE",
50 "NEW_SYNC",
51 };
52
53 /**
54 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
55 * @pf: the i40e_pf created in command write
56 * @seid: the seid the user put in
57 **/
i40e_dbg_dump_vsi_seid(struct i40e_pf * pf,int seid)58 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
59 {
60 struct rtnl_link_stats64 *nstat;
61 struct i40e_mac_filter *f;
62 struct i40e_vsi *vsi;
63 int i, bkt;
64
65 vsi = i40e_dbg_find_vsi(pf, seid);
66 if (!vsi) {
67 dev_info(&pf->pdev->dev,
68 "dump %d: seid not found\n", seid);
69 return;
70 }
71 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
72 if (vsi->netdev) {
73 struct net_device *nd = vsi->netdev;
74
75 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
76 nd->name, nd->state, nd->flags);
77 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
78 (unsigned long int)nd->features);
79 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
80 (unsigned long int)nd->hw_features);
81 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
82 (unsigned long int)nd->vlan_features);
83 }
84 dev_info(&pf->pdev->dev,
85 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
86 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
87 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
88 dev_info(&pf->pdev->dev,
89 " state[%d] = %08lx\n",
90 i, vsi->state[i]);
91 if (vsi->type == I40E_VSI_MAIN)
92 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
93 pf->hw.mac.addr,
94 pf->hw.mac.port_addr);
95 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
96 dev_info(&pf->pdev->dev,
97 " mac_filter_hash: %pM vid=%d, state %s\n",
98 f->macaddr, f->vlan,
99 i40e_filter_state_string[f->state]);
100 }
101 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
102 vsi->active_filters, vsi->promisc_threshold,
103 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
104 "ON" : "OFF"));
105 nstat = i40e_get_vsi_stats_struct(vsi);
106 dev_info(&pf->pdev->dev,
107 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
108 (unsigned long int)nstat->rx_packets,
109 (unsigned long int)nstat->rx_bytes,
110 (unsigned long int)nstat->rx_errors,
111 (unsigned long int)nstat->rx_dropped);
112 dev_info(&pf->pdev->dev,
113 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
114 (unsigned long int)nstat->tx_packets,
115 (unsigned long int)nstat->tx_bytes,
116 (unsigned long int)nstat->tx_errors,
117 (unsigned long int)nstat->tx_dropped);
118 dev_info(&pf->pdev->dev,
119 " net_stats: multicast = %lu, collisions = %lu\n",
120 (unsigned long int)nstat->multicast,
121 (unsigned long int)nstat->collisions);
122 dev_info(&pf->pdev->dev,
123 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
124 (unsigned long int)nstat->rx_length_errors,
125 (unsigned long int)nstat->rx_over_errors,
126 (unsigned long int)nstat->rx_crc_errors);
127 dev_info(&pf->pdev->dev,
128 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
129 (unsigned long int)nstat->rx_frame_errors,
130 (unsigned long int)nstat->rx_fifo_errors,
131 (unsigned long int)nstat->rx_missed_errors);
132 dev_info(&pf->pdev->dev,
133 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
134 (unsigned long int)nstat->tx_aborted_errors,
135 (unsigned long int)nstat->tx_carrier_errors,
136 (unsigned long int)nstat->tx_fifo_errors);
137 dev_info(&pf->pdev->dev,
138 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
139 (unsigned long int)nstat->tx_heartbeat_errors,
140 (unsigned long int)nstat->tx_window_errors);
141 dev_info(&pf->pdev->dev,
142 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
143 (unsigned long int)nstat->rx_compressed,
144 (unsigned long int)nstat->tx_compressed);
145 dev_info(&pf->pdev->dev,
146 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
147 (unsigned long int)vsi->net_stats_offsets.rx_packets,
148 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
149 (unsigned long int)vsi->net_stats_offsets.rx_errors,
150 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
151 dev_info(&pf->pdev->dev,
152 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
153 (unsigned long int)vsi->net_stats_offsets.tx_packets,
154 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
155 (unsigned long int)vsi->net_stats_offsets.tx_errors,
156 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
157 dev_info(&pf->pdev->dev,
158 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
159 (unsigned long int)vsi->net_stats_offsets.multicast,
160 (unsigned long int)vsi->net_stats_offsets.collisions);
161 dev_info(&pf->pdev->dev,
162 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
163 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
164 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
165 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
166 dev_info(&pf->pdev->dev,
167 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
168 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
169 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
170 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
171 dev_info(&pf->pdev->dev,
172 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
173 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
174 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
175 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
176 dev_info(&pf->pdev->dev,
177 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
178 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
179 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
180 dev_info(&pf->pdev->dev,
181 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
182 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
183 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
184 dev_info(&pf->pdev->dev,
185 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
186 vsi->tx_restart, vsi->tx_busy,
187 vsi->rx_buf_failed, vsi->rx_page_failed);
188 rcu_read_lock();
189 for (i = 0; i < vsi->num_queue_pairs; i++) {
190 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
191
192 if (!rx_ring)
193 continue;
194
195 dev_info(&pf->pdev->dev,
196 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
197 i, *rx_ring->state,
198 rx_ring->queue_index,
199 rx_ring->reg_idx);
200 dev_info(&pf->pdev->dev,
201 " rx_rings[%i]: rx_buf_len = %d\n",
202 i, rx_ring->rx_buf_len);
203 dev_info(&pf->pdev->dev,
204 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
205 i,
206 rx_ring->next_to_use,
207 rx_ring->next_to_clean,
208 rx_ring->ring_active);
209 dev_info(&pf->pdev->dev,
210 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
211 i, rx_ring->stats.packets,
212 rx_ring->stats.bytes,
213 rx_ring->rx_stats.non_eop_descs);
214 dev_info(&pf->pdev->dev,
215 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
216 i,
217 rx_ring->rx_stats.alloc_page_failed,
218 rx_ring->rx_stats.alloc_buff_failed);
219 dev_info(&pf->pdev->dev,
220 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
221 i,
222 rx_ring->rx_stats.page_reuse_count);
223 dev_info(&pf->pdev->dev,
224 " rx_rings[%i]: size = %i\n",
225 i, rx_ring->size);
226 dev_info(&pf->pdev->dev,
227 " rx_rings[%i]: itr_setting = %d (%s)\n",
228 i, rx_ring->itr_setting,
229 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
230 }
231 for (i = 0; i < vsi->num_queue_pairs; i++) {
232 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
233
234 if (!tx_ring)
235 continue;
236
237 dev_info(&pf->pdev->dev,
238 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
239 i, *tx_ring->state,
240 tx_ring->queue_index,
241 tx_ring->reg_idx);
242 dev_info(&pf->pdev->dev,
243 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
244 i,
245 tx_ring->next_to_use,
246 tx_ring->next_to_clean,
247 tx_ring->ring_active);
248 dev_info(&pf->pdev->dev,
249 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
250 i, tx_ring->stats.packets,
251 tx_ring->stats.bytes,
252 tx_ring->tx_stats.restart_queue);
253 dev_info(&pf->pdev->dev,
254 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
255 i,
256 tx_ring->tx_stats.tx_busy,
257 tx_ring->tx_stats.tx_done_old,
258 tx_ring->tx_stats.tx_stopped);
259 dev_info(&pf->pdev->dev,
260 " tx_rings[%i]: size = %i\n",
261 i, tx_ring->size);
262 dev_info(&pf->pdev->dev,
263 " tx_rings[%i]: DCB tc = %d\n",
264 i, tx_ring->dcb_tc);
265 dev_info(&pf->pdev->dev,
266 " tx_rings[%i]: itr_setting = %d (%s)\n",
267 i, tx_ring->itr_setting,
268 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
269 }
270 if (i40e_enabled_xdp_vsi(vsi)) {
271 for (i = 0; i < vsi->num_queue_pairs; i++) {
272 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
273
274 if (!xdp_ring)
275 continue;
276
277 dev_info(&pf->pdev->dev,
278 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
279 i, *xdp_ring->state,
280 xdp_ring->queue_index,
281 xdp_ring->reg_idx);
282 dev_info(&pf->pdev->dev,
283 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
284 i,
285 xdp_ring->next_to_use,
286 xdp_ring->next_to_clean,
287 xdp_ring->ring_active);
288 dev_info(&pf->pdev->dev,
289 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
290 i, xdp_ring->stats.packets,
291 xdp_ring->stats.bytes,
292 xdp_ring->tx_stats.restart_queue);
293 dev_info(&pf->pdev->dev,
294 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
295 i,
296 xdp_ring->tx_stats.tx_busy,
297 xdp_ring->tx_stats.tx_done_old);
298 dev_info(&pf->pdev->dev,
299 " xdp_rings[%i]: size = %i\n",
300 i, xdp_ring->size);
301 dev_info(&pf->pdev->dev,
302 " xdp_rings[%i]: DCB tc = %d\n",
303 i, xdp_ring->dcb_tc);
304 dev_info(&pf->pdev->dev,
305 " xdp_rings[%i]: itr_setting = %d (%s)\n",
306 i, xdp_ring->itr_setting,
307 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
308 "dynamic" : "fixed");
309 }
310 }
311 rcu_read_unlock();
312 dev_info(&pf->pdev->dev,
313 " work_limit = %d\n",
314 vsi->work_limit);
315 dev_info(&pf->pdev->dev,
316 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
317 vsi->max_frame, vsi->rx_buf_len, 0);
318 dev_info(&pf->pdev->dev,
319 " num_q_vectors = %i, base_vector = %i\n",
320 vsi->num_q_vectors, vsi->base_vector);
321 dev_info(&pf->pdev->dev,
322 " seid = %d, id = %d, uplink_seid = %d\n",
323 vsi->seid, vsi->id, vsi->uplink_seid);
324 dev_info(&pf->pdev->dev,
325 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
326 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
327 vsi->num_rx_desc);
328 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
329 if (vsi->type == I40E_VSI_SRIOV)
330 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
331 dev_info(&pf->pdev->dev,
332 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
333 vsi->info.valid_sections, vsi->info.switch_id);
334 dev_info(&pf->pdev->dev,
335 " info: sw_reserved[] = 0x%02x 0x%02x\n",
336 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
337 dev_info(&pf->pdev->dev,
338 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
339 vsi->info.sec_flags, vsi->info.sec_reserved);
340 dev_info(&pf->pdev->dev,
341 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
342 vsi->info.pvid, vsi->info.fcoe_pvid,
343 vsi->info.port_vlan_flags);
344 dev_info(&pf->pdev->dev,
345 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
346 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
347 vsi->info.pvlan_reserved[2]);
348 dev_info(&pf->pdev->dev,
349 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
350 vsi->info.ingress_table, vsi->info.egress_table);
351 dev_info(&pf->pdev->dev,
352 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
353 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
354 vsi->info.cas_pv_reserved);
355 dev_info(&pf->pdev->dev,
356 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
357 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
358 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
359 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
360 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
361 dev_info(&pf->pdev->dev,
362 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
363 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
364 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
365 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
366 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
367 dev_info(&pf->pdev->dev,
368 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
369 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
370 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
371 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
372 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
373 dev_info(&pf->pdev->dev,
374 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
375 vsi->info.queueing_opt_flags,
376 vsi->info.queueing_opt_reserved[0],
377 vsi->info.queueing_opt_reserved[1],
378 vsi->info.queueing_opt_reserved[2]);
379 dev_info(&pf->pdev->dev,
380 " info: up_enable_bits = 0x%02x\n",
381 vsi->info.up_enable_bits);
382 dev_info(&pf->pdev->dev,
383 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
384 vsi->info.sched_reserved, vsi->info.outer_up_table);
385 dev_info(&pf->pdev->dev,
386 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
387 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
388 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
389 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
390 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
391 dev_info(&pf->pdev->dev,
392 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
393 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
394 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
395 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
396 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
397 dev_info(&pf->pdev->dev,
398 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
399 vsi->info.stat_counter_idx, vsi->info.sched_id);
400 dev_info(&pf->pdev->dev,
401 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
402 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
403 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
404 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
405 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
406 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
407 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
408 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
409 dev_info(&pf->pdev->dev,
410 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
411 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
412 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
413 dev_info(&pf->pdev->dev,
414 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
415 i, vsi->tc_config.tc_info[i].qoffset,
416 vsi->tc_config.tc_info[i].qcount,
417 vsi->tc_config.tc_info[i].netdev_tc);
418 }
419 dev_info(&pf->pdev->dev,
420 " bw: bw_limit = %d, bw_max_quanta = %d\n",
421 vsi->bw_limit, vsi->bw_max_quanta);
422 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
423 dev_info(&pf->pdev->dev,
424 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
425 i, vsi->bw_ets_share_credits[i],
426 vsi->bw_ets_limit_credits[i],
427 vsi->bw_ets_max_quanta[i]);
428 }
429 }
430
431 /**
432 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
433 * @pf: the i40e_pf created in command write
434 **/
i40e_dbg_dump_aq_desc(struct i40e_pf * pf)435 static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
436 {
437 struct i40e_adminq_ring *ring;
438 struct i40e_hw *hw = &pf->hw;
439 char hdr[32];
440 int i;
441
442 snprintf(hdr, sizeof(hdr), "%s %s: ",
443 dev_driver_string(&pf->pdev->dev),
444 dev_name(&pf->pdev->dev));
445
446 /* first the send (command) ring, then the receive (event) ring */
447 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
448 ring = &(hw->aq.asq);
449 for (i = 0; i < ring->count; i++) {
450 struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
451
452 dev_info(&pf->pdev->dev,
453 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
454 i, d->flags, d->opcode, d->datalen, d->retval,
455 d->cookie_high, d->cookie_low);
456 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
457 16, 1, d->params.raw, 16, 0);
458 }
459
460 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
461 ring = &(hw->aq.arq);
462 for (i = 0; i < ring->count; i++) {
463 struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
464
465 dev_info(&pf->pdev->dev,
466 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
467 i, d->flags, d->opcode, d->datalen, d->retval,
468 d->cookie_high, d->cookie_low);
469 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
470 16, 1, d->params.raw, 16, 0);
471 }
472 }
473
474 /**
475 * i40e_dbg_dump_desc - handles dump desc write into command datum
476 * @cnt: number of arguments that the user supplied
477 * @vsi_seid: vsi id entered by user
478 * @ring_id: ring id entered by user
479 * @desc_n: descriptor number entered by user
480 * @pf: the i40e_pf created in command write
481 * @type: enum describing whether ring is RX, TX or XDP
482 **/
i40e_dbg_dump_desc(int cnt,int vsi_seid,int ring_id,int desc_n,struct i40e_pf * pf,enum ring_type type)483 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
484 struct i40e_pf *pf, enum ring_type type)
485 {
486 bool is_rx_ring = type == RING_TYPE_RX;
487 struct i40e_tx_desc *txd;
488 union i40e_rx_desc *rxd;
489 struct i40e_ring *ring;
490 struct i40e_vsi *vsi;
491 int i;
492
493 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
494 if (!vsi) {
495 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
496 return;
497 }
498 if (vsi->type != I40E_VSI_MAIN &&
499 vsi->type != I40E_VSI_FDIR &&
500 vsi->type != I40E_VSI_VMDQ2) {
501 dev_info(&pf->pdev->dev,
502 "vsi %d type %d descriptor rings not available\n",
503 vsi_seid, vsi->type);
504 return;
505 }
506 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
507 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
508 return;
509 }
510 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
511 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
512 return;
513 }
514 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
515 dev_info(&pf->pdev->dev,
516 "descriptor rings have not been allocated for vsi %d\n",
517 vsi_seid);
518 return;
519 }
520
521 switch (type) {
522 case RING_TYPE_RX:
523 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
524 break;
525 case RING_TYPE_TX:
526 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
527 break;
528 case RING_TYPE_XDP:
529 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
530 break;
531 default:
532 ring = NULL;
533 break;
534 }
535 if (!ring)
536 return;
537
538 if (cnt == 2) {
539 switch (type) {
540 case RING_TYPE_RX:
541 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
542 break;
543 case RING_TYPE_TX:
544 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
545 break;
546 case RING_TYPE_XDP:
547 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
548 break;
549 }
550 for (i = 0; i < ring->count; i++) {
551 if (!is_rx_ring) {
552 txd = I40E_TX_DESC(ring, i);
553 dev_info(&pf->pdev->dev,
554 " d[%03x] = 0x%016llx 0x%016llx\n",
555 i, txd->buffer_addr,
556 txd->cmd_type_offset_bsz);
557 } else {
558 rxd = I40E_RX_DESC(ring, i);
559 dev_info(&pf->pdev->dev,
560 " d[%03x] = 0x%016llx 0x%016llx\n",
561 i, rxd->read.pkt_addr,
562 rxd->read.hdr_addr);
563 }
564 }
565 } else if (cnt == 3) {
566 if (desc_n >= ring->count || desc_n < 0) {
567 dev_info(&pf->pdev->dev,
568 "descriptor %d not found\n", desc_n);
569 goto out;
570 }
571 if (!is_rx_ring) {
572 txd = I40E_TX_DESC(ring, desc_n);
573 dev_info(&pf->pdev->dev,
574 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
575 vsi_seid, ring_id, desc_n,
576 txd->buffer_addr, txd->cmd_type_offset_bsz);
577 } else {
578 rxd = I40E_RX_DESC(ring, desc_n);
579 dev_info(&pf->pdev->dev,
580 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
581 vsi_seid, ring_id, desc_n,
582 rxd->read.pkt_addr, rxd->read.hdr_addr);
583 }
584 } else {
585 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
586 }
587
588 out:
589 kfree(ring);
590 }
591
592 /**
593 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
594 * @pf: the i40e_pf created in command write
595 **/
i40e_dbg_dump_vsi_no_seid(struct i40e_pf * pf)596 static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
597 {
598 struct i40e_vsi *vsi;
599 int i;
600
601 i40e_pf_for_each_vsi(pf, i, vsi)
602 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
603 }
604
605 /**
606 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
607 * @pf: the i40e_pf created in command write
608 * @estats: the eth stats structure to be dumped
609 **/
i40e_dbg_dump_eth_stats(struct i40e_pf * pf,struct i40e_eth_stats * estats)610 static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
611 struct i40e_eth_stats *estats)
612 {
613 dev_info(&pf->pdev->dev, " ethstats:\n");
614 dev_info(&pf->pdev->dev,
615 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
616 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
617 dev_info(&pf->pdev->dev,
618 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
619 estats->rx_broadcast, estats->rx_discards);
620 dev_info(&pf->pdev->dev,
621 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
622 estats->rx_unknown_protocol, estats->tx_bytes);
623 dev_info(&pf->pdev->dev,
624 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
625 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
626 dev_info(&pf->pdev->dev,
627 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
628 estats->tx_discards, estats->tx_errors);
629 }
630
631 /**
632 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
633 * @pf: the i40e_pf created in command write
634 * @seid: the seid the user put in
635 **/
i40e_dbg_dump_veb_seid(struct i40e_pf * pf,int seid)636 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
637 {
638 struct i40e_veb *veb;
639
640 veb = i40e_pf_get_veb_by_seid(pf, seid);
641 if (!veb) {
642 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
643 return;
644 }
645 dev_info(&pf->pdev->dev,
646 "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
647 veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
648 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
649 i40e_dbg_dump_eth_stats(pf, &veb->stats);
650 }
651
652 /**
653 * i40e_dbg_dump_veb_all - dumps all known veb's stats
654 * @pf: the i40e_pf created in command write
655 **/
i40e_dbg_dump_veb_all(struct i40e_pf * pf)656 static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
657 {
658 struct i40e_veb *veb;
659 int i;
660
661 i40e_pf_for_each_veb(pf, i, veb)
662 i40e_dbg_dump_veb_seid(pf, veb->seid);
663 }
664
665 /**
666 * i40e_dbg_dump_vf - dump VF info
667 * @pf: the i40e_pf created in command write
668 * @vf_id: the vf_id from the user
669 **/
i40e_dbg_dump_vf(struct i40e_pf * pf,int vf_id)670 static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
671 {
672 struct i40e_vf *vf;
673 struct i40e_vsi *vsi;
674
675 if (!pf->num_alloc_vfs) {
676 dev_info(&pf->pdev->dev, "no VFs allocated\n");
677 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
678 vf = &pf->vf[vf_id];
679 vsi = pf->vsi[vf->lan_vsi_idx];
680 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
681 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
682 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
683 vf->mdd_tx_events.count + vf->mdd_rx_events.count);
684 } else {
685 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
686 }
687 }
688
689 /**
690 * i40e_dbg_dump_vf_all - dump VF info for all VFs
691 * @pf: the i40e_pf created in command write
692 **/
i40e_dbg_dump_vf_all(struct i40e_pf * pf)693 static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
694 {
695 int i;
696
697 if (!pf->num_alloc_vfs)
698 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
699 else
700 for (i = 0; i < pf->num_alloc_vfs; i++)
701 i40e_dbg_dump_vf(pf, i);
702 }
703
704 /**
705 * i40e_dbg_command_write - write into command datum
706 * @filp: the opened file
707 * @buffer: where to find the user's data
708 * @count: the length of the user's data
709 * @ppos: file position offset
710 **/
i40e_dbg_command_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)711 static ssize_t i40e_dbg_command_write(struct file *filp,
712 const char __user *buffer,
713 size_t count, loff_t *ppos)
714 {
715 struct i40e_pf *pf = filp->private_data;
716 char *cmd_buf, *cmd_buf_tmp;
717 int bytes_not_copied;
718 struct i40e_vsi *vsi;
719 int vsi_seid;
720 int veb_seid;
721 int vf_id;
722 int cnt;
723
724 /* don't allow partial writes */
725 if (*ppos != 0)
726 return 0;
727
728 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
729 if (!cmd_buf)
730 return count;
731 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
732 if (bytes_not_copied) {
733 kfree(cmd_buf);
734 return -EFAULT;
735 }
736 cmd_buf[count] = '\0';
737
738 cmd_buf_tmp = strchr(cmd_buf, '\n');
739 if (cmd_buf_tmp) {
740 *cmd_buf_tmp = '\0';
741 count = cmd_buf_tmp - cmd_buf + 1;
742 }
743
744 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
745 vsi_seid = -1;
746 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
747 if (cnt == 0) {
748 /* default to PF VSI */
749 vsi = i40e_pf_get_main_vsi(pf);
750 vsi_seid = vsi->seid;
751 } else if (vsi_seid < 0) {
752 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
753 vsi_seid);
754 goto command_write_done;
755 }
756
757 /* By default we are in VEPA mode, if this is the first VF/VMDq
758 * VSI to be added switch to VEB mode.
759 */
760 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
761 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
762 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
763 }
764
765 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
766 if (vsi)
767 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
768 vsi->seid, vsi->uplink_seid);
769 else
770 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
771
772 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
773 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
774 if (cnt != 1) {
775 dev_info(&pf->pdev->dev,
776 "del vsi: bad command string, cnt=%d\n",
777 cnt);
778 goto command_write_done;
779 }
780 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
781 if (!vsi) {
782 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
783 vsi_seid);
784 goto command_write_done;
785 }
786
787 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
788 i40e_vsi_release(vsi);
789
790 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
791 struct i40e_veb *veb;
792 u8 enabled_tc = 0x1;
793 int uplink_seid;
794
795 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
796 if (cnt == 0) {
797 uplink_seid = 0;
798 vsi_seid = 0;
799 } else if (cnt != 2) {
800 dev_info(&pf->pdev->dev,
801 "add relay: bad command string, cnt=%d\n",
802 cnt);
803 goto command_write_done;
804 } else if (uplink_seid < 0) {
805 dev_info(&pf->pdev->dev,
806 "add relay %d: bad uplink seid\n",
807 uplink_seid);
808 goto command_write_done;
809 }
810
811 if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
812 dev_info(&pf->pdev->dev,
813 "add relay: relay uplink %d not found\n",
814 uplink_seid);
815 goto command_write_done;
816 } else if (uplink_seid) {
817 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
818 if (!vsi) {
819 dev_info(&pf->pdev->dev,
820 "add relay: VSI %d not found\n",
821 vsi_seid);
822 goto command_write_done;
823 }
824 enabled_tc = vsi->tc_config.enabled_tc;
825 } else if (vsi_seid) {
826 dev_info(&pf->pdev->dev,
827 "add relay: VSI must be 0 for floating relay\n");
828 goto command_write_done;
829 }
830
831 veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc);
832 if (veb)
833 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
834 else
835 dev_info(&pf->pdev->dev, "add relay failed\n");
836
837 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
838 struct i40e_veb *veb;
839 int i;
840
841 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
842 if (cnt != 1) {
843 dev_info(&pf->pdev->dev,
844 "del relay: bad command string, cnt=%d\n",
845 cnt);
846 goto command_write_done;
847 } else if (veb_seid < 0) {
848 dev_info(&pf->pdev->dev,
849 "del relay %d: bad relay seid\n", veb_seid);
850 goto command_write_done;
851 }
852
853 /* find the veb */
854 i40e_pf_for_each_veb(pf, i, veb)
855 if (veb->seid == veb_seid)
856 break;
857
858 if (i >= I40E_MAX_VEB) {
859 dev_info(&pf->pdev->dev,
860 "del relay: relay %d not found\n", veb_seid);
861 goto command_write_done;
862 }
863
864 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
865 i40e_veb_release(veb);
866 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
867 unsigned int v;
868 int ret;
869 u16 vid;
870
871 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
872 if (cnt != 2) {
873 dev_info(&pf->pdev->dev,
874 "add pvid: bad command string, cnt=%d\n", cnt);
875 goto command_write_done;
876 }
877
878 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
879 if (!vsi) {
880 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
881 vsi_seid);
882 goto command_write_done;
883 }
884
885 vid = v;
886 ret = i40e_vsi_add_pvid(vsi, vid);
887 if (!ret)
888 dev_info(&pf->pdev->dev,
889 "add pvid: %d added to VSI %d\n",
890 vid, vsi_seid);
891 else
892 dev_info(&pf->pdev->dev,
893 "add pvid: %d to VSI %d failed, ret=%d\n",
894 vid, vsi_seid, ret);
895
896 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
897
898 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
899 if (cnt != 1) {
900 dev_info(&pf->pdev->dev,
901 "del pvid: bad command string, cnt=%d\n",
902 cnt);
903 goto command_write_done;
904 }
905
906 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
907 if (!vsi) {
908 dev_info(&pf->pdev->dev,
909 "del pvid: VSI %d not found\n", vsi_seid);
910 goto command_write_done;
911 }
912
913 i40e_vsi_remove_pvid(vsi);
914 dev_info(&pf->pdev->dev,
915 "del pvid: removed from VSI %d\n", vsi_seid);
916
917 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
918 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
919 i40e_fetch_switch_configuration(pf, true);
920 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
921 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
922 if (cnt > 0)
923 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
924 else
925 i40e_dbg_dump_vsi_no_seid(pf);
926 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
927 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
928 if (cnt > 0)
929 i40e_dbg_dump_veb_seid(pf, vsi_seid);
930 else
931 i40e_dbg_dump_veb_all(pf);
932 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
933 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
934 if (cnt > 0)
935 i40e_dbg_dump_vf(pf, vf_id);
936 else
937 i40e_dbg_dump_vf_all(pf);
938 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
939 int ring_id, desc_n;
940 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
941 cnt = sscanf(&cmd_buf[12], "%i %i %i",
942 &vsi_seid, &ring_id, &desc_n);
943 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
944 desc_n, pf, RING_TYPE_RX);
945 } else if (strncmp(&cmd_buf[10], "tx", 2)
946 == 0) {
947 cnt = sscanf(&cmd_buf[12], "%i %i %i",
948 &vsi_seid, &ring_id, &desc_n);
949 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
950 desc_n, pf, RING_TYPE_TX);
951 } else if (strncmp(&cmd_buf[10], "xdp", 3)
952 == 0) {
953 cnt = sscanf(&cmd_buf[13], "%i %i %i",
954 &vsi_seid, &ring_id, &desc_n);
955 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
956 desc_n, pf, RING_TYPE_XDP);
957 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
958 i40e_dbg_dump_aq_desc(pf);
959 } else {
960 dev_info(&pf->pdev->dev,
961 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
962 dev_info(&pf->pdev->dev,
963 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
964 dev_info(&pf->pdev->dev,
965 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
966 dev_info(&pf->pdev->dev, "dump desc aq\n");
967 }
968 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
969 dev_info(&pf->pdev->dev,
970 "core reset count: %d\n", pf->corer_count);
971 dev_info(&pf->pdev->dev,
972 "global reset count: %d\n", pf->globr_count);
973 dev_info(&pf->pdev->dev,
974 "emp reset count: %d\n", pf->empr_count);
975 dev_info(&pf->pdev->dev,
976 "pf reset count: %d\n", pf->pfr_count);
977 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
978 struct i40e_aqc_query_port_ets_config_resp *bw_data;
979 struct i40e_dcbx_config *cfg =
980 &pf->hw.local_dcbx_config;
981 struct i40e_dcbx_config *r_cfg =
982 &pf->hw.remote_dcbx_config;
983 int i, ret;
984 u16 switch_id;
985
986 bw_data = kzalloc(sizeof(
987 struct i40e_aqc_query_port_ets_config_resp),
988 GFP_KERNEL);
989 if (!bw_data) {
990 ret = -ENOMEM;
991 goto command_write_done;
992 }
993
994 vsi = i40e_pf_get_main_vsi(pf);
995 switch_id =
996 le16_to_cpu(vsi->info.switch_id) &
997 I40E_AQ_VSI_SW_ID_MASK;
998
999 ret = i40e_aq_query_port_ets_config(&pf->hw,
1000 switch_id,
1001 bw_data, NULL);
1002 if (ret) {
1003 dev_info(&pf->pdev->dev,
1004 "Query Port ETS Config AQ command failed =0x%x\n",
1005 pf->hw.aq.asq_last_status);
1006 kfree(bw_data);
1007 bw_data = NULL;
1008 goto command_write_done;
1009 }
1010 dev_info(&pf->pdev->dev,
1011 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1012 bw_data->tc_valid_bits,
1013 bw_data->tc_strict_priority_bits,
1014 le16_to_cpu(bw_data->tc_bw_max[0]),
1015 le16_to_cpu(bw_data->tc_bw_max[1]));
1016 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1017 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1018 bw_data->tc_bw_share_credits[i],
1019 le16_to_cpu(bw_data->tc_bw_limits[i]));
1020 }
1021
1022 kfree(bw_data);
1023 bw_data = NULL;
1024
1025 dev_info(&pf->pdev->dev,
1026 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1027 dev_info(&pf->pdev->dev,
1028 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1029 cfg->etscfg.willing, cfg->etscfg.cbs,
1030 cfg->etscfg.maxtcs);
1031 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1032 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1033 i, cfg->etscfg.prioritytable[i],
1034 cfg->etscfg.tcbwtable[i],
1035 cfg->etscfg.tsatable[i]);
1036 }
1037 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1038 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1039 i, cfg->etsrec.prioritytable[i],
1040 cfg->etsrec.tcbwtable[i],
1041 cfg->etsrec.tsatable[i]);
1042 }
1043 dev_info(&pf->pdev->dev,
1044 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1045 cfg->pfc.willing, cfg->pfc.mbc,
1046 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1047 dev_info(&pf->pdev->dev,
1048 "port app_table: num_apps=%d\n", cfg->numapps);
1049 for (i = 0; i < cfg->numapps; i++) {
1050 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1051 i, cfg->app[i].priority,
1052 cfg->app[i].selector,
1053 cfg->app[i].protocolid);
1054 }
1055 /* Peer TLV DCBX data */
1056 dev_info(&pf->pdev->dev,
1057 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1058 r_cfg->etscfg.willing,
1059 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1060 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1061 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1062 i, r_cfg->etscfg.prioritytable[i],
1063 r_cfg->etscfg.tcbwtable[i],
1064 r_cfg->etscfg.tsatable[i]);
1065 }
1066 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1067 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1068 i, r_cfg->etsrec.prioritytable[i],
1069 r_cfg->etsrec.tcbwtable[i],
1070 r_cfg->etsrec.tsatable[i]);
1071 }
1072 dev_info(&pf->pdev->dev,
1073 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1074 r_cfg->pfc.willing,
1075 r_cfg->pfc.mbc,
1076 r_cfg->pfc.pfccap,
1077 r_cfg->pfc.pfcenable);
1078 dev_info(&pf->pdev->dev,
1079 "remote port app_table: num_apps=%d\n",
1080 r_cfg->numapps);
1081 for (i = 0; i < r_cfg->numapps; i++) {
1082 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1083 i, r_cfg->app[i].priority,
1084 r_cfg->app[i].selector,
1085 r_cfg->app[i].protocolid);
1086 }
1087 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1088 int cluster_id, table_id;
1089 int index, ret;
1090 u16 buff_len = 4096;
1091 u32 next_index;
1092 u8 next_table;
1093 u8 *buff;
1094 u16 rlen;
1095
1096 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1097 &cluster_id, &table_id, &index);
1098 if (cnt != 3) {
1099 dev_info(&pf->pdev->dev,
1100 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1101 goto command_write_done;
1102 }
1103
1104 dev_info(&pf->pdev->dev,
1105 "AQ debug dump fwdata params %x %x %x %x\n",
1106 cluster_id, table_id, index, buff_len);
1107 buff = kzalloc(buff_len, GFP_KERNEL);
1108 if (!buff)
1109 goto command_write_done;
1110
1111 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1112 index, buff_len, buff, &rlen,
1113 &next_table, &next_index,
1114 NULL);
1115 if (ret) {
1116 dev_info(&pf->pdev->dev,
1117 "debug dump fwdata AQ Failed %d 0x%x\n",
1118 ret, pf->hw.aq.asq_last_status);
1119 kfree(buff);
1120 buff = NULL;
1121 goto command_write_done;
1122 }
1123 dev_info(&pf->pdev->dev,
1124 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1125 rlen, next_table, next_index);
1126 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1127 DUMP_PREFIX_OFFSET, 16, 1,
1128 buff, rlen, true);
1129 kfree(buff);
1130 buff = NULL;
1131 } else {
1132 dev_info(&pf->pdev->dev,
1133 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1134 dev_info(&pf->pdev->dev, "dump switch\n");
1135 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1136 dev_info(&pf->pdev->dev, "dump reset stats\n");
1137 dev_info(&pf->pdev->dev, "dump port\n");
1138 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1139 dev_info(&pf->pdev->dev,
1140 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1141 }
1142 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1143 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1144 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1145
1146 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1147 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1148 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1149
1150 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1151 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1152 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1153
1154 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1155 u32 address;
1156 u32 value;
1157
1158 cnt = sscanf(&cmd_buf[4], "%i", &address);
1159 if (cnt != 1) {
1160 dev_info(&pf->pdev->dev, "read <reg>\n");
1161 goto command_write_done;
1162 }
1163
1164 /* check the range on address */
1165 if (address > (pf->ioremap_len - sizeof(u32))) {
1166 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1167 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1168 goto command_write_done;
1169 }
1170
1171 value = rd32(&pf->hw, address);
1172 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1173 address, value);
1174
1175 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1176 u32 address, value;
1177
1178 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1179 if (cnt != 2) {
1180 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1181 goto command_write_done;
1182 }
1183
1184 /* check the range on address */
1185 if (address > (pf->ioremap_len - sizeof(u32))) {
1186 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1187 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1188 goto command_write_done;
1189 }
1190 wr32(&pf->hw, address, value);
1191 value = rd32(&pf->hw, address);
1192 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1193 address, value);
1194 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1195 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1196 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1197 if (cnt == 0) {
1198 int i;
1199
1200 i40e_pf_for_each_vsi(pf, i, vsi)
1201 i40e_vsi_reset_stats(vsi);
1202 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1203 } else if (cnt == 1) {
1204 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1205 if (!vsi) {
1206 dev_info(&pf->pdev->dev,
1207 "clear_stats vsi: bad vsi %d\n",
1208 vsi_seid);
1209 goto command_write_done;
1210 }
1211 i40e_vsi_reset_stats(vsi);
1212 dev_info(&pf->pdev->dev,
1213 "vsi clear stats called for vsi %d\n",
1214 vsi_seid);
1215 } else {
1216 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1217 }
1218 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1219 if (pf->hw.partition_id == 1) {
1220 i40e_pf_reset_stats(pf);
1221 dev_info(&pf->pdev->dev, "port stats cleared\n");
1222 } else {
1223 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1224 }
1225 } else {
1226 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1227 }
1228 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1229 struct libie_aq_desc *desc;
1230 int ret;
1231
1232 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
1233 if (!desc)
1234 goto command_write_done;
1235 cnt = sscanf(&cmd_buf[11],
1236 "%hi %hi %hi %hi %i %i %i %i %i %i",
1237 &desc->flags,
1238 &desc->opcode, &desc->datalen, &desc->retval,
1239 &desc->cookie_high, &desc->cookie_low,
1240 &desc->params.generic.param0,
1241 &desc->params.generic.param1,
1242 &desc->params.generic.addr_high,
1243 &desc->params.generic.addr_low);
1244 if (cnt != 10) {
1245 dev_info(&pf->pdev->dev,
1246 "send aq_cmd: bad command string, cnt=%d\n",
1247 cnt);
1248 kfree(desc);
1249 desc = NULL;
1250 goto command_write_done;
1251 }
1252 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
1253 if (!ret) {
1254 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1255 } else if (ret == -EIO) {
1256 dev_info(&pf->pdev->dev,
1257 "AQ command send failed Opcode %x AQ Error: %d\n",
1258 desc->opcode, pf->hw.aq.asq_last_status);
1259 } else {
1260 dev_info(&pf->pdev->dev,
1261 "AQ command send failed Opcode %x Status: %d\n",
1262 desc->opcode, ret);
1263 }
1264 dev_info(&pf->pdev->dev,
1265 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1266 desc->flags, desc->opcode, desc->datalen, desc->retval,
1267 desc->cookie_high, desc->cookie_low,
1268 desc->params.generic.param0,
1269 desc->params.generic.param1,
1270 desc->params.generic.addr_high,
1271 desc->params.generic.addr_low);
1272 kfree(desc);
1273 desc = NULL;
1274 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1275 struct libie_aq_desc *desc;
1276 u16 buffer_len;
1277 u8 *buff;
1278 int ret;
1279
1280 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
1281 if (!desc)
1282 goto command_write_done;
1283 cnt = sscanf(&cmd_buf[20],
1284 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1285 &desc->flags,
1286 &desc->opcode, &desc->datalen, &desc->retval,
1287 &desc->cookie_high, &desc->cookie_low,
1288 &desc->params.generic.param0,
1289 &desc->params.generic.param1,
1290 &desc->params.generic.addr_high,
1291 &desc->params.generic.addr_low,
1292 &buffer_len);
1293 if (cnt != 11) {
1294 dev_info(&pf->pdev->dev,
1295 "send indirect aq_cmd: bad command string, cnt=%d\n",
1296 cnt);
1297 kfree(desc);
1298 desc = NULL;
1299 goto command_write_done;
1300 }
1301 /* Just stub a buffer big enough in case user messed up */
1302 if (buffer_len == 0)
1303 buffer_len = 1280;
1304
1305 buff = kzalloc(buffer_len, GFP_KERNEL);
1306 if (!buff) {
1307 kfree(desc);
1308 desc = NULL;
1309 goto command_write_done;
1310 }
1311 desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
1312 ret = i40e_asq_send_command(&pf->hw, desc, buff,
1313 buffer_len, NULL);
1314 if (!ret) {
1315 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1316 } else if (ret == -EIO) {
1317 dev_info(&pf->pdev->dev,
1318 "AQ command send failed Opcode %x AQ Error: %d\n",
1319 desc->opcode, pf->hw.aq.asq_last_status);
1320 } else {
1321 dev_info(&pf->pdev->dev,
1322 "AQ command send failed Opcode %x Status: %d\n",
1323 desc->opcode, ret);
1324 }
1325 dev_info(&pf->pdev->dev,
1326 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1327 desc->flags, desc->opcode, desc->datalen, desc->retval,
1328 desc->cookie_high, desc->cookie_low,
1329 desc->params.generic.param0,
1330 desc->params.generic.param1,
1331 desc->params.generic.addr_high,
1332 desc->params.generic.addr_low);
1333 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1334 DUMP_PREFIX_OFFSET, 16, 1,
1335 buff, buffer_len, true);
1336 kfree(buff);
1337 buff = NULL;
1338 kfree(desc);
1339 desc = NULL;
1340 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1341 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1342 i40e_get_current_fd_count(pf));
1343 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1344 /* Get main VSI */
1345 struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
1346
1347 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1348 int ret;
1349
1350 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
1351 if (ret) {
1352 dev_info(&pf->pdev->dev,
1353 "Stop LLDP AQ command failed =0x%x\n",
1354 pf->hw.aq.asq_last_status);
1355 goto command_write_done;
1356 }
1357 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1358 pf->hw.mac.addr, ETH_P_LLDP, 0,
1359 main_vsi->seid, 0, true, NULL,
1360 NULL);
1361 if (ret) {
1362 dev_info(&pf->pdev->dev,
1363 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1364 __func__, pf->hw.aq.asq_last_status);
1365 goto command_write_done;
1366 }
1367 #ifdef CONFIG_I40E_DCB
1368 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1369 DCB_CAP_DCBX_VER_IEEE;
1370 #endif /* CONFIG_I40E_DCB */
1371 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1372 int ret;
1373
1374 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1375 pf->hw.mac.addr, ETH_P_LLDP, 0,
1376 main_vsi->seid, 0, false, NULL,
1377 NULL);
1378 if (ret) {
1379 dev_info(&pf->pdev->dev,
1380 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1381 __func__, pf->hw.aq.asq_last_status);
1382 /* Continue and start FW LLDP anyways */
1383 }
1384
1385 ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
1386 if (ret) {
1387 dev_info(&pf->pdev->dev,
1388 "Start LLDP AQ command failed =0x%x\n",
1389 pf->hw.aq.asq_last_status);
1390 goto command_write_done;
1391 }
1392 #ifdef CONFIG_I40E_DCB
1393 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1394 DCB_CAP_DCBX_VER_IEEE;
1395 #endif /* CONFIG_I40E_DCB */
1396 } else if (strncmp(&cmd_buf[5],
1397 "get local", 9) == 0) {
1398 u16 llen, rlen;
1399 int ret;
1400 u8 *buff;
1401
1402 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1403 if (!buff)
1404 goto command_write_done;
1405
1406 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1407 I40E_AQ_LLDP_MIB_LOCAL,
1408 buff, I40E_LLDPDU_SIZE,
1409 &llen, &rlen, NULL);
1410 if (ret) {
1411 dev_info(&pf->pdev->dev,
1412 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1413 pf->hw.aq.asq_last_status);
1414 kfree(buff);
1415 buff = NULL;
1416 goto command_write_done;
1417 }
1418 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1419 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1420 DUMP_PREFIX_OFFSET, 16, 1,
1421 buff, I40E_LLDPDU_SIZE, true);
1422 kfree(buff);
1423 buff = NULL;
1424 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1425 u16 llen, rlen;
1426 int ret;
1427 u8 *buff;
1428
1429 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1430 if (!buff)
1431 goto command_write_done;
1432
1433 ret = i40e_aq_get_lldp_mib(&pf->hw,
1434 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1435 I40E_AQ_LLDP_MIB_REMOTE,
1436 buff, I40E_LLDPDU_SIZE,
1437 &llen, &rlen, NULL);
1438 if (ret) {
1439 dev_info(&pf->pdev->dev,
1440 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1441 pf->hw.aq.asq_last_status);
1442 kfree(buff);
1443 buff = NULL;
1444 goto command_write_done;
1445 }
1446 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1447 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1448 DUMP_PREFIX_OFFSET, 16, 1,
1449 buff, I40E_LLDPDU_SIZE, true);
1450 kfree(buff);
1451 buff = NULL;
1452 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1453 int ret;
1454
1455 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1456 true, NULL);
1457 if (ret) {
1458 dev_info(&pf->pdev->dev,
1459 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1460 pf->hw.aq.asq_last_status);
1461 goto command_write_done;
1462 }
1463 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1464 int ret;
1465
1466 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1467 false, NULL);
1468 if (ret) {
1469 dev_info(&pf->pdev->dev,
1470 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1471 pf->hw.aq.asq_last_status);
1472 goto command_write_done;
1473 }
1474 }
1475 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1476 u16 buffer_len, bytes;
1477 u16 module;
1478 u32 offset;
1479 u16 *buff;
1480 int ret;
1481
1482 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1483 &module, &offset, &buffer_len);
1484 if (cnt == 0) {
1485 module = 0;
1486 offset = 0;
1487 buffer_len = 0;
1488 } else if (cnt == 1) {
1489 offset = 0;
1490 buffer_len = 0;
1491 } else if (cnt == 2) {
1492 buffer_len = 0;
1493 } else if (cnt > 3) {
1494 dev_info(&pf->pdev->dev,
1495 "nvm read: bad command string, cnt=%d\n", cnt);
1496 goto command_write_done;
1497 }
1498
1499 /* set the max length */
1500 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1501
1502 bytes = 2 * buffer_len;
1503
1504 /* read at least 1k bytes, no more than 4kB */
1505 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1506 buff = kzalloc(bytes, GFP_KERNEL);
1507 if (!buff)
1508 goto command_write_done;
1509
1510 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1511 if (ret) {
1512 dev_info(&pf->pdev->dev,
1513 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1514 ret, pf->hw.aq.asq_last_status);
1515 kfree(buff);
1516 goto command_write_done;
1517 }
1518
1519 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1520 bytes, (u8 *)buff, true, NULL);
1521 i40e_release_nvm(&pf->hw);
1522 if (ret) {
1523 dev_info(&pf->pdev->dev,
1524 "Read NVM AQ failed err=%d status=0x%x\n",
1525 ret, pf->hw.aq.asq_last_status);
1526 } else {
1527 dev_info(&pf->pdev->dev,
1528 "Read NVM module=0x%x offset=0x%x words=%d\n",
1529 module, offset, buffer_len);
1530 if (bytes)
1531 print_hex_dump(KERN_INFO, "NVM Dump: ",
1532 DUMP_PREFIX_OFFSET, 16, 2,
1533 buff, bytes, true);
1534 }
1535 kfree(buff);
1536 buff = NULL;
1537 } else {
1538 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1539 dev_info(&pf->pdev->dev, "available commands\n");
1540 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1541 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1542 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1543 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1544 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1545 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1546 dev_info(&pf->pdev->dev, " dump switch\n");
1547 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1548 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1549 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1550 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1551 dev_info(&pf->pdev->dev, " dump desc aq\n");
1552 dev_info(&pf->pdev->dev, " dump reset stats\n");
1553 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1554 dev_info(&pf->pdev->dev, " read <reg>\n");
1555 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1556 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1557 dev_info(&pf->pdev->dev, " clear_stats port\n");
1558 dev_info(&pf->pdev->dev, " pfr\n");
1559 dev_info(&pf->pdev->dev, " corer\n");
1560 dev_info(&pf->pdev->dev, " globr\n");
1561 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1562 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1563 dev_info(&pf->pdev->dev, " fd current cnt");
1564 dev_info(&pf->pdev->dev, " lldp start\n");
1565 dev_info(&pf->pdev->dev, " lldp stop\n");
1566 dev_info(&pf->pdev->dev, " lldp get local\n");
1567 dev_info(&pf->pdev->dev, " lldp get remote\n");
1568 dev_info(&pf->pdev->dev, " lldp event on\n");
1569 dev_info(&pf->pdev->dev, " lldp event off\n");
1570 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1571 }
1572
1573 command_write_done:
1574 kfree(cmd_buf);
1575 cmd_buf = NULL;
1576 return count;
1577 }
1578
1579 static const struct file_operations i40e_dbg_command_fops = {
1580 .owner = THIS_MODULE,
1581 .open = simple_open,
1582 .write = i40e_dbg_command_write,
1583 };
1584
1585 /**************************************************************
1586 * netdev_ops
1587 * The netdev_ops entry in debugfs is for giving the driver commands
1588 * to be executed from the netdev operations.
1589 **************************************************************/
1590
1591 /**
1592 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1593 * @filp: the opened file
1594 * @buffer: where to find the user's data
1595 * @count: the length of the user's data
1596 * @ppos: file position offset
1597 **/
i40e_dbg_netdev_ops_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1598 static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1599 const char __user *buffer,
1600 size_t count, loff_t *ppos)
1601 {
1602 struct i40e_pf *pf = filp->private_data;
1603 char *cmd_buf, *buf_tmp;
1604 int bytes_not_copied;
1605 struct i40e_vsi *vsi;
1606 int vsi_seid;
1607 int i, cnt;
1608
1609 /* don't allow partial writes */
1610 if (*ppos != 0)
1611 return 0;
1612
1613 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1614 if (!cmd_buf)
1615 return count;
1616 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
1617 if (bytes_not_copied) {
1618 kfree(cmd_buf);
1619 return -EFAULT;
1620 }
1621 cmd_buf[count] = '\0';
1622
1623 buf_tmp = strchr(cmd_buf, '\n');
1624 if (buf_tmp) {
1625 *buf_tmp = '\0';
1626 count = buf_tmp - cmd_buf + 1;
1627 }
1628
1629 if (strncmp(cmd_buf, "change_mtu", 10) == 0) {
1630 int mtu;
1631
1632 cnt = sscanf(&cmd_buf[11], "%i %i",
1633 &vsi_seid, &mtu);
1634 if (cnt != 2) {
1635 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1636 goto netdev_ops_write_done;
1637 }
1638 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1639 if (!vsi) {
1640 dev_info(&pf->pdev->dev,
1641 "change_mtu: VSI %d not found\n", vsi_seid);
1642 } else if (!vsi->netdev) {
1643 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1644 vsi_seid);
1645 } else if (rtnl_trylock()) {
1646 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1647 mtu);
1648 rtnl_unlock();
1649 dev_info(&pf->pdev->dev, "change_mtu called\n");
1650 } else {
1651 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1652 }
1653
1654 } else if (strncmp(cmd_buf, "set_rx_mode", 11) == 0) {
1655 cnt = sscanf(&cmd_buf[11], "%i", &vsi_seid);
1656 if (cnt != 1) {
1657 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1658 goto netdev_ops_write_done;
1659 }
1660 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1661 if (!vsi) {
1662 dev_info(&pf->pdev->dev,
1663 "set_rx_mode: VSI %d not found\n", vsi_seid);
1664 } else if (!vsi->netdev) {
1665 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1666 vsi_seid);
1667 } else if (rtnl_trylock()) {
1668 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1669 rtnl_unlock();
1670 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1671 } else {
1672 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1673 }
1674
1675 } else if (strncmp(cmd_buf, "napi", 4) == 0) {
1676 cnt = sscanf(&cmd_buf[4], "%i", &vsi_seid);
1677 if (cnt != 1) {
1678 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1679 goto netdev_ops_write_done;
1680 }
1681 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1682 if (!vsi) {
1683 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1684 vsi_seid);
1685 } else if (!vsi->netdev) {
1686 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1687 vsi_seid);
1688 } else {
1689 for (i = 0; i < vsi->num_q_vectors; i++)
1690 napi_schedule(&vsi->q_vectors[i]->napi);
1691 dev_info(&pf->pdev->dev, "napi called\n");
1692 }
1693 } else {
1694 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1695 dev_info(&pf->pdev->dev, "available commands\n");
1696 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1697 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1698 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1699 }
1700 netdev_ops_write_done:
1701 kfree(cmd_buf);
1702 return count;
1703 }
1704
1705 static const struct file_operations i40e_dbg_netdev_ops_fops = {
1706 .owner = THIS_MODULE,
1707 .open = simple_open,
1708 .write = i40e_dbg_netdev_ops_write,
1709 };
1710
1711 /**
1712 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1713 * @pf: the PF that is starting up
1714 **/
i40e_dbg_pf_init(struct i40e_pf * pf)1715 void i40e_dbg_pf_init(struct i40e_pf *pf)
1716 {
1717 const char *name = pci_name(pf->pdev);
1718
1719 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
1720
1721 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
1722 &i40e_dbg_command_fops);
1723
1724 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
1725 &i40e_dbg_netdev_ops_fops);
1726 }
1727
1728 /**
1729 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1730 * @pf: the PF that is stopping
1731 **/
i40e_dbg_pf_exit(struct i40e_pf * pf)1732 void i40e_dbg_pf_exit(struct i40e_pf *pf)
1733 {
1734 debugfs_remove_recursive(pf->i40e_dbg_pf);
1735 pf->i40e_dbg_pf = NULL;
1736 }
1737
1738 /**
1739 * i40e_dbg_init - start up debugfs for the driver
1740 **/
i40e_dbg_init(void)1741 void i40e_dbg_init(void)
1742 {
1743 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
1744 if (IS_ERR(i40e_dbg_root))
1745 pr_info("init of debugfs failed\n");
1746 }
1747
1748 /**
1749 * i40e_dbg_exit - clean out the driver's debugfs entries
1750 **/
i40e_dbg_exit(void)1751 void i40e_dbg_exit(void)
1752 {
1753 debugfs_remove_recursive(i40e_dbg_root);
1754 i40e_dbg_root = NULL;
1755 }
1756
1757 #endif /* CONFIG_DEBUG_FS */
1758