1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 8 struct xstats xfsstats; 9 10 static int counter_val(struct xfsstats __percpu *stats, int idx) 11 { 12 int val = 0, cpu; 13 14 for_each_possible_cpu(cpu) 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); 16 return val; 17 } 18 19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) 20 { 21 int i, j; 22 int len = 0; 23 uint64_t xs_xstrat_bytes = 0; 24 uint64_t xs_write_bytes = 0; 25 uint64_t xs_read_bytes = 0; 26 uint64_t defer_relog = 0; 27 28 static const struct xstats_entry { 29 char *desc; 30 int endpoint; 31 } xstats[] = { 32 { "extent_alloc", xfsstats_offset(xs_abt_lookup) }, 33 { "abt", xfsstats_offset(xs_blk_mapr) }, 34 { "blk_map", xfsstats_offset(xs_bmbt_lookup) }, 35 { "bmbt", xfsstats_offset(xs_dir_lookup) }, 36 { "dir", xfsstats_offset(xs_trans_sync) }, 37 { "trans", xfsstats_offset(xs_ig_attempts) }, 38 { "ig", xfsstats_offset(xs_log_writes) }, 39 { "log", xfsstats_offset(xs_try_logspace)}, 40 { "push_ail", xfsstats_offset(xs_xstrat_quick)}, 41 { "xstrat", xfsstats_offset(xs_write_calls) }, 42 { "rw", xfsstats_offset(xs_attr_get) }, 43 { "attr", xfsstats_offset(xs_iflush_count)}, 44 { "icluster", xfsstats_offset(vn_active) }, 45 { "vnodes", xfsstats_offset(xb_get) }, 46 { "buf", xfsstats_offset(xs_abtb_2) }, 47 { "abtb2", xfsstats_offset(xs_abtc_2) }, 48 { "abtc2", xfsstats_offset(xs_bmbt_2) }, 49 { "bmbt2", xfsstats_offset(xs_ibt_2) }, 50 { "ibt2", xfsstats_offset(xs_fibt_2) }, 51 { "fibt2", xfsstats_offset(xs_rmap_2) }, 52 { "rmapbt", xfsstats_offset(xs_refcbt_2) }, 53 { "refcntbt", xfsstats_offset(xs_rmap_mem_2) }, 54 { "rmapbt_mem", xfsstats_offset(xs_rcbag_2) }, 55 { "rcbagbt", xfsstats_offset(xs_rtrmap_2) }, 56 { "rtrmapbt", xfsstats_offset(xs_rtrmap_mem_2)}, 57 { "rtrmapbt_mem", xfsstats_offset(xs_rtrefcbt_2) }, 58 { "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)}, 59 /* we print both series of quota information together */ 60 { "qm", xfsstats_offset(xs_xstrat_bytes)}, 61 }; 62 63 /* Loop over all stats groups */ 64 65 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) { 66 len += scnprintf(buf + len, PATH_MAX - len, "%s", 67 xstats[i].desc); 68 /* inner loop does each group */ 69 for (; j < xstats[i].endpoint; j++) 70 len += scnprintf(buf + len, PATH_MAX - len, " %u", 71 counter_val(stats, j)); 72 len += scnprintf(buf + len, PATH_MAX - len, "\n"); 73 } 74 /* extra precision counters */ 75 for_each_possible_cpu(i) { 76 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; 77 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; 78 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; 79 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog; 80 } 81 82 len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n", 83 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); 84 len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n", 85 defer_relog); 86 len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n", 87 #if defined(DEBUG) 88 1); 89 #else 90 0); 91 #endif 92 93 return len; 94 } 95 96 void xfs_stats_clearall(struct xfsstats __percpu *stats) 97 { 98 int c; 99 uint32_t vn_active; 100 101 xfs_notice(NULL, "Clearing xfsstats"); 102 for_each_possible_cpu(c) { 103 preempt_disable(); 104 /* save vn_active, it's a universal truth! */ 105 vn_active = per_cpu_ptr(stats, c)->s.vn_active; 106 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); 107 per_cpu_ptr(stats, c)->s.vn_active = vn_active; 108 preempt_enable(); 109 } 110 } 111 112 #ifdef CONFIG_PROC_FS 113 /* legacy quota interfaces */ 114 #ifdef CONFIG_XFS_QUOTA 115 116 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims) 117 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot) 118 119 static int xqm_proc_show(struct seq_file *m, void *v) 120 { 121 /* maximum; incore; ratio free to inuse; freelist; rtquota */ 122 seq_printf(m, "%d\t%d\t%d\t%u\t%s\n", 123 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT), 124 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1), 125 IS_ENABLED(CONFIG_XFS_RT) ? "rtquota" : "quota"); 126 return 0; 127 } 128 129 /* legacy quota stats interface no 2 */ 130 static int xqmstat_proc_show(struct seq_file *m, void *v) 131 { 132 int j; 133 134 seq_puts(m, "qm"); 135 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++) 136 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j)); 137 seq_putc(m, '\n'); 138 return 0; 139 } 140 #endif /* CONFIG_XFS_QUOTA */ 141 142 int 143 xfs_init_procfs(void) 144 { 145 if (!proc_mkdir("fs/xfs", NULL)) 146 return -ENOMEM; 147 148 if (!proc_symlink("fs/xfs/stat", NULL, 149 "/sys/fs/xfs/stats/stats")) 150 goto out; 151 152 #ifdef CONFIG_XFS_QUOTA 153 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show)) 154 goto out; 155 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show)) 156 goto out; 157 #endif 158 return 0; 159 160 out: 161 remove_proc_subtree("fs/xfs", NULL); 162 return -ENOMEM; 163 } 164 165 void 166 xfs_cleanup_procfs(void) 167 { 168 remove_proc_subtree("fs/xfs", NULL); 169 } 170 #endif /* CONFIG_PROC_FS */ 171