xref: /linux/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * RDMA Transport Layer
4  *
5  * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6  * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7  * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8  */
9 #undef pr_fmt
10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
11 
12 #include "rtrs-clt.h"
13 
14 void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
15 {
16 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
17 	struct rtrs_clt_stats *stats = clt_path->stats;
18 	struct rtrs_clt_stats_pcpu *s;
19 	int cpu;
20 
21 	cpu = raw_smp_processor_id();
22 	s = get_cpu_ptr(stats->pcpu_stats);
23 	if (con->cpu != cpu) {
24 		s->cpu_migr.to++;
25 
26 		/* Careful here, override s pointer */
27 		s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
28 		atomic_inc(&s->cpu_migr.from);
29 	}
30 	put_cpu_ptr(stats->pcpu_stats);
31 }
32 
33 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
34 {
35 	this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
36 }
37 
38 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
39 {
40 	struct rtrs_clt_stats_pcpu *s;
41 
42 	size_t used;
43 	int cpu;
44 
45 	used = 0;
46 	for_each_possible_cpu(cpu) {
47 		s = per_cpu_ptr(stats->pcpu_stats, cpu);
48 		used += sysfs_emit_at(buf, used, "%d ",
49 				  atomic_read(&s->cpu_migr.from));
50 	}
51 
52 	used += sysfs_emit_at(buf, used, "\n");
53 
54 	return used;
55 }
56 
57 int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
58 {
59 	struct rtrs_clt_stats_pcpu *s;
60 
61 	size_t used;
62 	int cpu;
63 
64 	used = 0;
65 	for_each_possible_cpu(cpu) {
66 		s = per_cpu_ptr(stats->pcpu_stats, cpu);
67 		used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
68 	}
69 
70 	used += sysfs_emit_at(buf, used, "\n");
71 
72 	return used;
73 }
74 
75 int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
76 {
77 	return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
78 			  stats->reconnects.fail_cnt);
79 }
80 
81 ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
82 {
83 	struct rtrs_clt_stats_rdma sum;
84 	struct rtrs_clt_stats_rdma *r;
85 	int cpu;
86 
87 	memset(&sum, 0, sizeof(sum));
88 
89 	for_each_possible_cpu(cpu) {
90 		r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
91 
92 		sum.dir[READ].cnt	  += r->dir[READ].cnt;
93 		sum.dir[READ].size_total  += r->dir[READ].size_total;
94 		sum.dir[WRITE].cnt	  += r->dir[WRITE].cnt;
95 		sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
96 		sum.failover_cnt	  += r->failover_cnt;
97 	}
98 
99 	return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
100 			 sum.dir[READ].cnt, sum.dir[READ].size_total,
101 			 sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
102 			 atomic_read(&stats->inflight), sum.failover_cnt);
103 }
104 
105 ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
106 {
107 	return sysfs_emit(page, "echo 1 to reset all statistics\n");
108 }
109 
110 int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
111 {
112 	struct rtrs_clt_stats_pcpu *s;
113 	int cpu;
114 
115 	if (!enable)
116 		return -EINVAL;
117 
118 	for_each_possible_cpu(cpu) {
119 		s = per_cpu_ptr(stats->pcpu_stats, cpu);
120 		memset(&s->rdma, 0, sizeof(s->rdma));
121 	}
122 
123 	return 0;
124 }
125 
126 int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
127 {
128 	struct rtrs_clt_stats_pcpu *s;
129 	int cpu;
130 
131 	if (!enable)
132 		return -EINVAL;
133 
134 	for_each_possible_cpu(cpu) {
135 		s = per_cpu_ptr(stats->pcpu_stats, cpu);
136 		memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
137 	}
138 
139 	return 0;
140 }
141 
142 int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
143 {
144 	if (!enable)
145 		return -EINVAL;
146 
147 	memset(&stats->reconnects, 0, sizeof(stats->reconnects));
148 
149 	return 0;
150 }
151 
152 int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
153 {
154 	if (enable) {
155 		rtrs_clt_reset_rdma_stats(s, enable);
156 		rtrs_clt_reset_cpu_migr_stats(s, enable);
157 		rtrs_clt_reset_reconnects_stat(s, enable);
158 		atomic_set(&s->inflight, 0);
159 		return 0;
160 	}
161 
162 	return -EINVAL;
163 }
164 
165 static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
166 					       size_t size, int d)
167 {
168 	this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
169 	this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
170 }
171 
172 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
173 {
174 	struct rtrs_clt_con *con = req->con;
175 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
176 	struct rtrs_clt_stats *stats = clt_path->stats;
177 	unsigned int len;
178 
179 	len = req->usr_len + req->data_len;
180 	rtrs_clt_update_rdma_stats(stats, len, dir);
181 	if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
182 		atomic_inc(&stats->inflight);
183 }
184 
185 int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
186 {
187 	stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
188 	if (!stats->pcpu_stats)
189 		return -ENOMEM;
190 
191 	/*
192 	 * successful_cnt will be set to 0 after session
193 	 * is established for the first time
194 	 */
195 	stats->reconnects.successful_cnt = -1;
196 
197 	return 0;
198 }
199