1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2012 Cisco Systems, Inc. All rights reserved.
3
4 #include <linux/module.h>
5 #include <linux/mempool.h>
6 #include <linux/errno.h>
7 #include <linux/spinlock.h>
8 #include <linux/kallsyms.h>
9 #include <linux/time.h>
10 #include <linux/vmalloc.h>
11 #include <scsi/scsi_transport_fc.h>
12 #include "fnic_io.h"
13 #include "fnic.h"
14
15 unsigned int trace_max_pages;
16 static int fnic_max_trace_entries;
17
18 static unsigned long fnic_trace_buf_p;
19 static DEFINE_SPINLOCK(fnic_trace_lock);
20
21 static fnic_trace_dbg_t fnic_trace_entries;
22 int fnic_tracing_enabled = 1;
23
24 /* static char *fnic_fc_ctlr_trace_buf_p; */
25
26 static int fc_trace_max_entries;
27 static unsigned long fnic_fc_ctlr_trace_buf_p;
28 static fnic_trace_dbg_t fc_trace_entries;
29 int fnic_fc_tracing_enabled = 1;
30 int fnic_fc_trace_cleared = 1;
31 static DEFINE_SPINLOCK(fnic_fc_trace_lock);
32
33 static const char * const fnic_role_str[] = {
34 [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator",
35 };
36
fnic_role_to_str(unsigned int role)37 const char *fnic_role_to_str(unsigned int role)
38 {
39 if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role])
40 return "Unknown";
41
42 return fnic_role_str[role];
43 }
44
45 /*
46 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
47 *
48 * Description:
49 * This routine gets next available trace buffer entry location @wr_idx
50 * from allocated trace buffer pages and give that memory location
51 * to user to store the trace information.
52 *
53 * Return Value:
54 * This routine returns pointer to next available trace entry
55 * @fnic_buf_head for user to fill trace information.
56 */
fnic_trace_get_buf(void)57 fnic_trace_data_t *fnic_trace_get_buf(void)
58 {
59 unsigned long fnic_buf_head;
60 unsigned long flags;
61
62 spin_lock_irqsave(&fnic_trace_lock, flags);
63
64 /*
65 * Get next available memory location for writing trace information
66 * at @wr_idx and increment @wr_idx
67 */
68 fnic_buf_head =
69 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
70 fnic_trace_entries.wr_idx++;
71
72 /*
73 * Verify if trace buffer is full then change wd_idx to
74 * start from zero
75 */
76 if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
77 fnic_trace_entries.wr_idx = 0;
78
79 /*
80 * Verify if write index @wr_idx and read index @rd_idx are same then
81 * increment @rd_idx to move to next entry in trace buffer
82 */
83 if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
84 fnic_trace_entries.rd_idx++;
85 if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
86 fnic_trace_entries.rd_idx = 0;
87 }
88 spin_unlock_irqrestore(&fnic_trace_lock, flags);
89 return (fnic_trace_data_t *)fnic_buf_head;
90 }
91
92 /*
93 * fnic_get_trace_data - Copy trace buffer to a memory file
94 * @fnic_dbgfs_t: pointer to debugfs trace buffer
95 *
96 * Description:
97 * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
98 * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
99 * the log and process the log until the end of the buffer. Then it will gather
100 * from the beginning of the log and process until the current entry @wr_idx.
101 *
102 * Return Value:
103 * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
104 */
fnic_get_trace_data(fnic_dbgfs_t * fnic_dbgfs_prt)105 int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
106 {
107 int rd_idx;
108 int wr_idx;
109 int len = 0;
110 unsigned long flags;
111 char str[KSYM_SYMBOL_LEN];
112 struct timespec64 val;
113 fnic_trace_data_t *tbp;
114
115 spin_lock_irqsave(&fnic_trace_lock, flags);
116 rd_idx = fnic_trace_entries.rd_idx;
117 wr_idx = fnic_trace_entries.wr_idx;
118 if (wr_idx < rd_idx) {
119 while (1) {
120 /* Start from read index @rd_idx */
121 tbp = (fnic_trace_data_t *)
122 fnic_trace_entries.page_offset[rd_idx];
123 if (!tbp) {
124 spin_unlock_irqrestore(&fnic_trace_lock, flags);
125 return 0;
126 }
127 /* Convert function pointer to function name */
128 if (sizeof(unsigned long) < 8) {
129 sprint_symbol(str, tbp->fnaddr.low);
130 jiffies_to_timespec64(tbp->timestamp.low, &val);
131 } else {
132 sprint_symbol(str, tbp->fnaddr.val);
133 jiffies_to_timespec64(tbp->timestamp.val, &val);
134 }
135 /*
136 * Dump trace buffer entry to memory file
137 * and increment read index @rd_idx
138 */
139 len += scnprintf(fnic_dbgfs_prt->buffer + len,
140 (trace_max_pages * PAGE_SIZE * 3) - len,
141 "%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n",
142 &val, str, tbp->host_no, tbp->tag,
143 tbp->data[0], tbp->data[1], tbp->data[2],
144 tbp->data[3], tbp->data[4]);
145 rd_idx++;
146 /*
147 * If rd_idx is reached to maximum trace entries
148 * then move rd_idx to zero
149 */
150 if (rd_idx > (fnic_max_trace_entries-1))
151 rd_idx = 0;
152 /*
153 * Continue dumping trace buffer entries into
154 * memory file till rd_idx reaches write index
155 */
156 if (rd_idx == wr_idx)
157 break;
158 }
159 } else if (wr_idx > rd_idx) {
160 while (1) {
161 /* Start from read index @rd_idx */
162 tbp = (fnic_trace_data_t *)
163 fnic_trace_entries.page_offset[rd_idx];
164 if (!tbp) {
165 spin_unlock_irqrestore(&fnic_trace_lock, flags);
166 return 0;
167 }
168 /* Convert function pointer to function name */
169 if (sizeof(unsigned long) < 8) {
170 sprint_symbol(str, tbp->fnaddr.low);
171 jiffies_to_timespec64(tbp->timestamp.low, &val);
172 } else {
173 sprint_symbol(str, tbp->fnaddr.val);
174 jiffies_to_timespec64(tbp->timestamp.val, &val);
175 }
176 /*
177 * Dump trace buffer entry to memory file
178 * and increment read index @rd_idx
179 */
180 len += scnprintf(fnic_dbgfs_prt->buffer + len,
181 (trace_max_pages * PAGE_SIZE * 3) - len,
182 "%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n",
183 &val, str, tbp->host_no, tbp->tag,
184 tbp->data[0], tbp->data[1], tbp->data[2],
185 tbp->data[3], tbp->data[4]);
186 rd_idx++;
187 /*
188 * Continue dumping trace buffer entries into
189 * memory file till rd_idx reaches write index
190 */
191 if (rd_idx == wr_idx)
192 break;
193 }
194 }
195 spin_unlock_irqrestore(&fnic_trace_lock, flags);
196 return len;
197 }
198
199 /*
200 * fnic_get_stats_data - Copy fnic stats buffer to a memory file
201 * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
202 *
203 * Description:
204 * This routine gathers the fnic stats debugfs data from the fnic_stats struct
205 * and dumps it to stats_debug_info.
206 *
207 * Return Value:
208 * This routine returns the amount of bytes that were dumped into
209 * stats_debug_info
210 */
fnic_get_stats_data(struct stats_debug_info * debug,struct fnic_stats * stats)211 int fnic_get_stats_data(struct stats_debug_info *debug,
212 struct fnic_stats *stats)
213 {
214 int len = 0;
215 int buf_size = debug->buf_size;
216 struct timespec64 val, val1, val2;
217 int i = 0;
218
219 ktime_get_real_ts64(&val);
220 len = scnprintf(debug->debug_buffer + len, buf_size - len,
221 "------------------------------------------\n"
222 "\t\tTime\n"
223 "------------------------------------------\n");
224
225 val1 = timespec64_sub(val, stats->stats_timestamps.last_reset_time);
226 val2 = timespec64_sub(val, stats->stats_timestamps.last_read_time);
227 len += scnprintf(debug->debug_buffer + len, buf_size - len,
228 "Current time : [%ptSp]\n"
229 "Last stats reset time: [%ptSp]\n"
230 "Last stats read time: [%ptSp]\n"
231 "delta since last reset: [%ptSp]\n"
232 "delta since last read: [%ptSp]\n",
233 &val,
234 &stats->stats_timestamps.last_reset_time,
235 &stats->stats_timestamps.last_read_time,
236 &val1, &val2);
237
238 stats->stats_timestamps.last_read_time = val;
239
240 len += scnprintf(debug->debug_buffer + len, buf_size - len,
241 "------------------------------------------\n"
242 "\t\tIO Statistics\n"
243 "------------------------------------------\n");
244 len += scnprintf(debug->debug_buffer + len, buf_size - len,
245 "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
246 "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
247 "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
248 "Number of Memory alloc Failures: %lld\n"
249 "Number of IOREQ Null: %lld\n"
250 "Number of SCSI cmd pointer Null: %lld\n"
251
252 "\nIO completion times: \n"
253 " < 10 ms : %lld\n"
254 " 10 ms - 100 ms : %lld\n"
255 " 100 ms - 500 ms : %lld\n"
256 " 500 ms - 5 sec: %lld\n"
257 " 5 sec - 10 sec: %lld\n"
258 " 10 sec - 30 sec: %lld\n"
259 " > 30 sec: %lld\n",
260 (u64)atomic64_read(&stats->io_stats.active_ios),
261 (u64)atomic64_read(&stats->io_stats.max_active_ios),
262 (u64)atomic64_read(&stats->io_stats.num_ios),
263 (u64)atomic64_read(&stats->io_stats.io_completions),
264 (u64)atomic64_read(&stats->io_stats.io_failures),
265 (u64)atomic64_read(&stats->io_stats.io_not_found),
266 (u64)atomic64_read(&stats->io_stats.alloc_failures),
267 (u64)atomic64_read(&stats->io_stats.ioreq_null),
268 (u64)atomic64_read(&stats->io_stats.sc_null),
269 (u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec),
270 (u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec),
271 (u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec),
272 (u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec),
273 (u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec),
274 (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
275 (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
276
277 len += scnprintf(debug->debug_buffer + len, buf_size - len,
278 "------------------------------------------\n"
279 "\t\tIO Queues and cumulative IOs\n"
280 "------------------------------------------\n");
281
282 for (i = 0; i < FNIC_MQ_MAX_QUEUES; i++) {
283 len += scnprintf(debug->debug_buffer + len, buf_size - len,
284 "Q:%d -> %lld\n", i, (u64)atomic64_read(&stats->io_stats.ios[i]));
285 }
286
287 len += scnprintf(debug->debug_buffer + len, buf_size - len,
288 "\nCurrent Max IO time : %lld\n",
289 (u64)atomic64_read(&stats->io_stats.current_max_io_time));
290
291 len += scnprintf(debug->debug_buffer + len, buf_size - len,
292 "\n------------------------------------------\n"
293 "\t\tAbort Statistics\n"
294 "------------------------------------------\n");
295
296 len += scnprintf(debug->debug_buffer + len, buf_size - len,
297 "Number of Aborts: %lld\n"
298 "Number of Abort Failures: %lld\n"
299 "Number of Abort Driver Timeouts: %lld\n"
300 "Number of Abort FW Timeouts: %lld\n"
301 "Number of Abort IO NOT Found: %lld\n"
302
303 "Abort issued times: \n"
304 " < 6 sec : %lld\n"
305 " 6 sec - 20 sec : %lld\n"
306 " 20 sec - 30 sec : %lld\n"
307 " 30 sec - 40 sec : %lld\n"
308 " 40 sec - 50 sec : %lld\n"
309 " 50 sec - 60 sec : %lld\n"
310 " > 60 sec: %lld\n",
311
312 (u64)atomic64_read(&stats->abts_stats.aborts),
313 (u64)atomic64_read(&stats->abts_stats.abort_failures),
314 (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
315 (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
316 (u64)atomic64_read(&stats->abts_stats.abort_io_not_found),
317 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec),
318 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec),
319 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec),
320 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec),
321 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec),
322 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec),
323 (u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec));
324
325 len += scnprintf(debug->debug_buffer + len, buf_size - len,
326 "\n------------------------------------------\n"
327 "\t\tTerminate Statistics\n"
328 "------------------------------------------\n");
329
330 len += scnprintf(debug->debug_buffer + len, buf_size - len,
331 "Number of Terminates: %lld\n"
332 "Maximum Terminates: %lld\n"
333 "Number of Terminate Driver Timeouts: %lld\n"
334 "Number of Terminate FW Timeouts: %lld\n"
335 "Number of Terminate IO NOT Found: %lld\n"
336 "Number of Terminate Failures: %lld\n",
337 (u64)atomic64_read(&stats->term_stats.terminates),
338 (u64)atomic64_read(&stats->term_stats.max_terminates),
339 (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
340 (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
341 (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
342 (u64)atomic64_read(&stats->term_stats.terminate_failures));
343
344 len += scnprintf(debug->debug_buffer + len, buf_size - len,
345 "\n------------------------------------------\n"
346 "\t\tReset Statistics\n"
347 "------------------------------------------\n");
348
349 len += scnprintf(debug->debug_buffer + len, buf_size - len,
350 "Number of Device Resets: %lld\n"
351 "Number of Device Reset Failures: %lld\n"
352 "Number of Device Reset Aborts: %lld\n"
353 "Number of Device Reset Timeouts: %lld\n"
354 "Number of Device Reset Terminates: %lld\n"
355 "Number of FW Resets: %lld\n"
356 "Number of FW Reset Completions: %lld\n"
357 "Number of FW Reset Failures: %lld\n"
358 "Number of Fnic Reset: %lld\n"
359 "Number of Fnic Reset Completions: %lld\n"
360 "Number of Fnic Reset Failures: %lld\n",
361 (u64)atomic64_read(&stats->reset_stats.device_resets),
362 (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
363 (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
364 (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
365 (u64)atomic64_read(
366 &stats->reset_stats.device_reset_terminates),
367 (u64)atomic64_read(&stats->reset_stats.fw_resets),
368 (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
369 (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
370 (u64)atomic64_read(&stats->reset_stats.fnic_resets),
371 (u64)atomic64_read(
372 &stats->reset_stats.fnic_reset_completions),
373 (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
374
375 len += scnprintf(debug->debug_buffer + len, buf_size - len,
376 "\n------------------------------------------\n"
377 "\t\tFirmware Statistics\n"
378 "------------------------------------------\n");
379
380 len += scnprintf(debug->debug_buffer + len, buf_size - len,
381 "Number of Active FW Requests %lld\n"
382 "Maximum FW Requests: %lld\n"
383 "Number of FW out of resources: %lld\n"
384 "Number of FW IO errors: %lld\n",
385 (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
386 (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
387 (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
388 (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
389
390 len += scnprintf(debug->debug_buffer + len, buf_size - len,
391 "\n------------------------------------------\n"
392 "\t\tVlan Discovery Statistics\n"
393 "------------------------------------------\n");
394
395 len += scnprintf(debug->debug_buffer + len, buf_size - len,
396 "Number of Vlan Discovery Requests Sent %lld\n"
397 "Vlan Response Received with no FCF VLAN ID: %lld\n"
398 "No solicitations recvd after vlan set, expiry count: %lld\n"
399 "Flogi rejects count: %lld\n",
400 (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
401 (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
402 (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
403 (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
404
405 len += scnprintf(debug->debug_buffer + len, buf_size - len,
406 "\n------------------------------------------\n"
407 "\t\tOther Important Statistics\n"
408 "------------------------------------------\n");
409
410 jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
411 jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
412
413 len += scnprintf(debug->debug_buffer + len, buf_size - len,
414 "Last ISR time: %llu (%ptSp)\n"
415 "Last ACK time: %llu (%ptSp)\n"
416 "Max ISR jiffies: %llu\n"
417 "Max ISR time (ms) (0 denotes < 1 ms): %llu\n"
418 "Corr. work done: %llu\n"
419 "Number of ISRs: %lld\n"
420 "Maximum CQ Entries: %lld\n"
421 "Number of ACK index out of range: %lld\n"
422 "Number of data count mismatch: %lld\n"
423 "Number of FCPIO Timeouts: %lld\n"
424 "Number of FCPIO Aborted: %lld\n"
425 "Number of SGL Invalid: %lld\n"
426 "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
427 "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
428 "Number of Copy WQ Alloc Failures for IOs: %lld\n"
429 "Number of no icmnd itmf Completions: %lld\n"
430 "Number of Check Conditions encountered: %lld\n"
431 "Number of QUEUE Fulls: %lld\n"
432 "Number of rport not ready: %lld\n"
433 "Number of receive frame errors: %lld\n"
434 "Port speed (in Mbps): %lld\n",
435 (u64)stats->misc_stats.last_isr_time, &val1,
436 (u64)stats->misc_stats.last_ack_time, &val2,
437 (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies),
438 (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms),
439 (u64)atomic64_read(&stats->misc_stats.corr_work_done),
440 (u64)atomic64_read(&stats->misc_stats.isr_count),
441 (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
442 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
443 (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
444 (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
445 (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
446 (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
447 (u64)atomic64_read(
448 &stats->misc_stats.abts_cpwq_alloc_failures),
449 (u64)atomic64_read(
450 &stats->misc_stats.devrst_cpwq_alloc_failures),
451 (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
452 (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
453 (u64)atomic64_read(&stats->misc_stats.check_condition),
454 (u64)atomic64_read(&stats->misc_stats.queue_fulls),
455 (u64)atomic64_read(&stats->misc_stats.tport_not_ready),
456 (u64)atomic64_read(&stats->misc_stats.frame_errors),
457 (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps));
458
459 return len;
460
461 }
462
fnic_get_debug_info(struct stats_debug_info * info,struct fnic * fnic)463 int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic)
464 {
465 struct fnic_iport_s *iport = &fnic->iport;
466 int buf_size = info->buf_size;
467 int len = info->buffer_len;
468 struct fnic_tport_s *tport, *next;
469 unsigned long flags;
470
471 len += snprintf(info->debug_buffer + len, buf_size - len,
472 "------------------------------------------\n"
473 "\t\t Debug Info\n"
474 "------------------------------------------\n");
475 len += snprintf(info->debug_buffer + len, buf_size - len,
476 "fnic Name:%s number:%d Role:%s State:%s\n",
477 fnic->name, fnic->fnic_num,
478 fnic_role_to_str(fnic->role),
479 fnic_state_to_str(fnic->state));
480 len +=
481 snprintf(info->debug_buffer + len, buf_size - len,
482 "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n",
483 iport->state, iport->flags, iport->vlan_id, iport->fcid);
484 len +=
485 snprintf(info->debug_buffer + len, buf_size - len,
486 "usefip:%d fip_state:%d fip_flogi_retry:%d\n",
487 iport->usefip, iport->fip.state, iport->fip.flogi_retry);
488 len +=
489 snprintf(info->debug_buffer + len, buf_size - len,
490 "fpma %02x:%02x:%02x:%02x:%02x:%02x",
491 iport->fpma[5], iport->fpma[4], iport->fpma[3],
492 iport->fpma[2], iport->fpma[1], iport->fpma[0]);
493 len +=
494 snprintf(info->debug_buffer + len, buf_size - len,
495 "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n",
496 iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3],
497 iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]);
498 len +=
499 snprintf(info->debug_buffer + len, buf_size - len,
500 "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n",
501 iport->fabric.state, iport->fabric.flags,
502 iport->fabric.retry_counter, iport->e_d_tov,
503 iport->r_a_tov);
504
505 spin_lock_irqsave(&fnic->fnic_lock, flags);
506 list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
507 len += snprintf(info->debug_buffer + len, buf_size - len,
508 "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n",
509 tport->fcid, tport->state, tport->flags,
510 atomic_read(&tport->in_flight),
511 tport->retry_counter);
512 }
513 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
514 return len;
515 }
516
517 /*
518 * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
519 *
520 * Description:
521 * Initialize trace buffer data structure by allocating required memory and
522 * setting page_offset information for every trace entry by adding trace entry
523 * length to previous page_offset value.
524 */
fnic_trace_buf_init(void)525 int fnic_trace_buf_init(void)
526 {
527 unsigned long fnic_buf_head;
528 int i;
529 int err = 0;
530
531 trace_max_pages = fnic_trace_max_pages;
532 fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
533 FNIC_ENTRY_SIZE_BYTES;
534
535 fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE);
536 if (!fnic_trace_buf_p) {
537 printk(KERN_ERR PFX "Failed to allocate memory "
538 "for fnic_trace_buf_p\n");
539 err = -ENOMEM;
540 goto err_fnic_trace_buf_init;
541 }
542
543 fnic_trace_entries.page_offset =
544 vcalloc(fnic_max_trace_entries, sizeof(unsigned long));
545 if (!fnic_trace_entries.page_offset) {
546 printk(KERN_ERR PFX "Failed to allocate memory for"
547 " page_offset\n");
548 if (fnic_trace_buf_p) {
549 vfree((void *)fnic_trace_buf_p);
550 fnic_trace_buf_p = 0;
551 }
552 err = -ENOMEM;
553 goto err_fnic_trace_buf_init;
554 }
555 fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
556 fnic_buf_head = fnic_trace_buf_p;
557
558 /*
559 * Set page_offset field of fnic_trace_entries struct by
560 * calculating memory location for every trace entry using
561 * length of each trace entry
562 */
563 for (i = 0; i < fnic_max_trace_entries; i++) {
564 fnic_trace_entries.page_offset[i] = fnic_buf_head;
565 fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
566 }
567 fnic_trace_debugfs_init();
568 pr_info("fnic: Successfully Initialized Trace Buffer\n");
569 return err;
570
571 err_fnic_trace_buf_init:
572 return err;
573 }
574
575 /*
576 * fnic_trace_free - Free memory of fnic trace data structures.
577 */
fnic_trace_free(void)578 void fnic_trace_free(void)
579 {
580 fnic_tracing_enabled = 0;
581 fnic_trace_debugfs_terminate();
582 if (fnic_trace_entries.page_offset) {
583 vfree((void *)fnic_trace_entries.page_offset);
584 fnic_trace_entries.page_offset = NULL;
585 }
586 if (fnic_trace_buf_p) {
587 vfree((void *)fnic_trace_buf_p);
588 fnic_trace_buf_p = 0;
589 }
590 printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
591 }
592
593 /*
594 * fnic_fc_ctlr_trace_buf_init -
595 * Initialize trace buffer to log fnic control frames
596 * Description:
597 * Initialize trace buffer data structure by allocating
598 * required memory for trace data as well as for Indexes.
599 * Frame size is 256 bytes and
600 * memory is allocated for 1024 entries of 256 bytes.
601 * Page_offset(Index) is set to the address of trace entry
602 * and page_offset is initialized by adding frame size
603 * to the previous page_offset entry.
604 */
605
fnic_fc_trace_init(void)606 int fnic_fc_trace_init(void)
607 {
608 unsigned long fc_trace_buf_head;
609 int err = 0;
610 int i;
611
612 fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
613 FC_TRC_SIZE_BYTES;
614 fnic_fc_ctlr_trace_buf_p =
615 (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE);
616 if (!fnic_fc_ctlr_trace_buf_p) {
617 pr_err("fnic: Failed to allocate memory for "
618 "FC Control Trace Buf\n");
619 err = -ENOMEM;
620 goto err_fnic_fc_ctlr_trace_buf_init;
621 }
622
623 /* Allocate memory for page offset */
624 fc_trace_entries.page_offset =
625 vcalloc(fc_trace_max_entries, sizeof(unsigned long));
626 if (!fc_trace_entries.page_offset) {
627 pr_err("fnic:Failed to allocate memory for page_offset\n");
628 if (fnic_fc_ctlr_trace_buf_p) {
629 pr_err("fnic: Freeing FC Control Trace Buf\n");
630 vfree((void *)fnic_fc_ctlr_trace_buf_p);
631 fnic_fc_ctlr_trace_buf_p = 0;
632 }
633 err = -ENOMEM;
634 goto err_fnic_fc_ctlr_trace_buf_init;
635 }
636
637 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
638 fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
639
640 /*
641 * Set up fc_trace_entries.page_offset field with memory location
642 * for every trace entry
643 */
644 for (i = 0; i < fc_trace_max_entries; i++) {
645 fc_trace_entries.page_offset[i] = fc_trace_buf_head;
646 fc_trace_buf_head += FC_TRC_SIZE_BYTES;
647 }
648 fnic_fc_trace_debugfs_init();
649 pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
650 return err;
651
652 err_fnic_fc_ctlr_trace_buf_init:
653 return err;
654 }
655
656 /*
657 * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
658 */
fnic_fc_trace_free(void)659 void fnic_fc_trace_free(void)
660 {
661 fnic_fc_tracing_enabled = 0;
662 fnic_fc_trace_debugfs_terminate();
663 if (fc_trace_entries.page_offset) {
664 vfree((void *)fc_trace_entries.page_offset);
665 fc_trace_entries.page_offset = NULL;
666 }
667 if (fnic_fc_ctlr_trace_buf_p) {
668 vfree((void *)fnic_fc_ctlr_trace_buf_p);
669 fnic_fc_ctlr_trace_buf_p = 0;
670 }
671 pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
672 }
673
674 /*
675 * fnic_fc_ctlr_set_trace_data:
676 * Maintain rd & wr idx accordingly and set data
677 * Passed parameters:
678 * host_no: host number associated with fnic
679 * frame_type: send_frame, rece_frame or link event
680 * fc_frame: pointer to fc_frame
681 * frame_len: Length of the fc_frame
682 * Description:
683 * This routine will get next available wr_idx and
684 * copy all passed trace data to the buffer pointed by wr_idx
685 * and increment wr_idx. It will also make sure that we dont
686 * overwrite the entry which we are reading and also
687 * wrap around if we reach the maximum entries.
688 * Returned Value:
689 * It will return 0 for success or -1 for failure
690 */
fnic_fc_trace_set_data(u32 host_no,u8 frame_type,char * frame,u32 fc_trc_frame_len)691 int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
692 char *frame, u32 fc_trc_frame_len)
693 {
694 unsigned long flags;
695 struct fc_trace_hdr *fc_buf;
696 unsigned long eth_fcoe_hdr_len;
697 char *fc_trace;
698
699 if (fnic_fc_tracing_enabled == 0)
700 return 0;
701
702 spin_lock_irqsave(&fnic_fc_trace_lock, flags);
703
704 if (fnic_fc_trace_cleared == 1) {
705 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
706 pr_info("fnic: Resetting the read idx\n");
707 memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
708 fnic_fc_trace_max_pages * PAGE_SIZE);
709 fnic_fc_trace_cleared = 0;
710 }
711
712 fc_buf = (struct fc_trace_hdr *)
713 fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
714
715 fc_trace_entries.wr_idx++;
716
717 if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
718 fc_trace_entries.wr_idx = 0;
719
720 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
721 fc_trace_entries.rd_idx++;
722 if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
723 fc_trace_entries.rd_idx = 0;
724 }
725
726 ktime_get_real_ts64(&fc_buf->time_stamp);
727 fc_buf->host_no = host_no;
728 fc_buf->frame_type = frame_type;
729
730 fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
731
732 /* During the receive path, we do not have eth hdr as well as fcoe hdr
733 * at trace entry point so we will stuff 0xff just to make it generic.
734 */
735 if (frame_type == FNIC_FC_RECV) {
736 eth_fcoe_hdr_len = sizeof(struct ethhdr) +
737 sizeof(struct fcoe_hdr);
738 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
739 /* Copy the rest of data frame */
740 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
741 min_t(u8, fc_trc_frame_len,
742 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE
743 - eth_fcoe_hdr_len)));
744 } else {
745 memcpy((char *)fc_trace, (void *)frame,
746 min_t(u8, fc_trc_frame_len,
747 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
748 }
749
750 /* Store the actual received length */
751 fc_buf->frame_len = fc_trc_frame_len;
752
753 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
754 return 0;
755 }
756
757 /*
758 * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
759 * Passed parameter:
760 * @fnic_dbgfs_t: pointer to debugfs trace buffer
761 * rdata_flag: 1 => Unformatted file
762 * 0 => formatted file
763 * Description:
764 * This routine will copy the trace data to memory file with
765 * proper formatting and also copy to another memory
766 * file without formatting for further processing.
767 * Return Value:
768 * Number of bytes that were dumped into fnic_dbgfs_t
769 */
770
fnic_fc_trace_get_data(fnic_dbgfs_t * fnic_dbgfs_prt,u8 rdata_flag)771 int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
772 {
773 int rd_idx, wr_idx;
774 unsigned long flags;
775 int len = 0, j;
776 struct fc_trace_hdr *tdata;
777 char *fc_trace;
778
779 spin_lock_irqsave(&fnic_fc_trace_lock, flags);
780 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
781 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
782 pr_info("fnic: Buffer is empty\n");
783 return 0;
784 }
785 rd_idx = fc_trace_entries.rd_idx;
786 wr_idx = fc_trace_entries.wr_idx;
787 if (rdata_flag == 0) {
788 len += scnprintf(fnic_dbgfs_prt->buffer + len,
789 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
790 "Time Stamp (UTC)\t\t"
791 "Host No: F Type: len: FCoE_FRAME:\n");
792 }
793
794 while (rd_idx != wr_idx) {
795 tdata = (struct fc_trace_hdr *)
796 fc_trace_entries.page_offset[rd_idx];
797 if (!tdata) {
798 pr_info("fnic: Rd data is NULL\n");
799 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
800 return 0;
801 }
802 if (rdata_flag == 0) {
803 copy_and_format_trace_data(tdata,
804 fnic_dbgfs_prt, &len, rdata_flag);
805 } else {
806 fc_trace = (char *)tdata;
807 for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
808 len += scnprintf(fnic_dbgfs_prt->buffer + len,
809 (fnic_fc_trace_max_pages * PAGE_SIZE * 3)
810 - len, "%02x", fc_trace[j] & 0xff);
811 } /* for loop */
812 len += scnprintf(fnic_dbgfs_prt->buffer + len,
813 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
814 "\n");
815 }
816 rd_idx++;
817 if (rd_idx > (fc_trace_max_entries - 1))
818 rd_idx = 0;
819 }
820
821 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
822 return len;
823 }
824
825 /*
826 * copy_and_format_trace_data: Copy formatted data to char * buffer
827 * Passed Parameter:
828 * @fc_trace_hdr_t: pointer to trace data
829 * @fnic_dbgfs_t: pointer to debugfs trace buffer
830 * @orig_len: pointer to len
831 * rdata_flag: 0 => Formatted file, 1 => Unformatted file
832 * Description:
833 * This routine will format and copy the passed trace data
834 * for formatted file or unformatted file accordingly.
835 */
836
copy_and_format_trace_data(struct fc_trace_hdr * tdata,fnic_dbgfs_t * fnic_dbgfs_prt,int * orig_len,u8 rdata_flag)837 void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
838 fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
839 u8 rdata_flag)
840 {
841 int j, i = 1, len;
842 int ethhdr_len = sizeof(struct ethhdr) - 1;
843 int fcoehdr_len = sizeof(struct fcoe_hdr);
844 int fchdr_len = sizeof(struct fc_frame_header);
845 int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
846 char *fc_trace;
847
848 tdata->frame_type = tdata->frame_type & 0x7F;
849
850 len = *orig_len;
851
852 len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len,
853 "%ptSs ns%8x %c%8x\t",
854 &tdata->time_stamp,
855 tdata->host_no, tdata->frame_type, tdata->frame_len);
856
857 fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
858
859 for (j = 0; j < min_t(u8, tdata->frame_len,
860 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
861 if (tdata->frame_type == FNIC_FC_LE) {
862 len += scnprintf(fnic_dbgfs_prt->buffer + len,
863 max_size - len, "%c", fc_trace[j]);
864 } else {
865 len += scnprintf(fnic_dbgfs_prt->buffer + len,
866 max_size - len, "%02x", fc_trace[j] & 0xff);
867 len += scnprintf(fnic_dbgfs_prt->buffer + len,
868 max_size - len, " ");
869 if (j == ethhdr_len ||
870 j == ethhdr_len + fcoehdr_len ||
871 j == ethhdr_len + fcoehdr_len + fchdr_len ||
872 (i > 3 && j%fchdr_len == 0)) {
873 len += scnprintf(fnic_dbgfs_prt->buffer
874 + len, max_size - len,
875 "\n\t\t\t\t\t\t\t\t");
876 i++;
877 }
878 } /* end of else*/
879 } /* End of for loop*/
880 len += scnprintf(fnic_dbgfs_prt->buffer + len,
881 max_size - len, "\n");
882 *orig_len = len;
883 }
884