xref: /linux/fs/ceph/metric.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/types.h>
5 #include <linux/percpu_counter.h>
6 #include <linux/math64.h>
7 
8 #include "metric.h"
9 #include "mds_client.h"
10 
ktime_to_ceph_timespec(struct ceph_timespec * ts,ktime_t val)11 static void ktime_to_ceph_timespec(struct ceph_timespec *ts, ktime_t val)
12 {
13 	struct timespec64 t = ktime_to_timespec64(val);
14 	ceph_encode_timespec64(ts, &t);
15 }
16 
ceph_mdsc_send_metrics(struct ceph_mds_client * mdsc,struct ceph_mds_session * s)17 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
18 				   struct ceph_mds_session *s)
19 {
20 	struct ceph_metric_head *head;
21 	struct ceph_metric_cap *cap;
22 	struct ceph_metric_read_latency *read;
23 	struct ceph_metric_write_latency *write;
24 	struct ceph_metric_metadata_latency *meta;
25 	struct ceph_metric_dlease *dlease;
26 	struct ceph_opened_files *files;
27 	struct ceph_pinned_icaps *icaps;
28 	struct ceph_opened_inodes *inodes;
29 	struct ceph_read_io_size *rsize;
30 	struct ceph_write_io_size *wsize;
31 	struct ceph_client_metric *m = &mdsc->metric;
32 	u64 nr_caps = atomic64_read(&m->total_caps);
33 	u32 header_len = sizeof(struct ceph_metric_header);
34 	struct ceph_client *cl = mdsc->fsc->client;
35 	struct ceph_msg *msg;
36 	s64 sum;
37 	s32 items = 0;
38 	s32 len;
39 
40 	/* Do not send the metrics until the MDS rank is ready */
41 	mutex_lock(&mdsc->mutex);
42 	if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
43 		mutex_unlock(&mdsc->mutex);
44 		return false;
45 	}
46 	mutex_unlock(&mdsc->mutex);
47 
48 	len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
49 	      + sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
50 	      + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
51 	      + sizeof(*wsize);
52 
53 	msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
54 	if (!msg) {
55 		pr_err_client(cl, "to mds%d, failed to allocate message\n",
56 			      s->s_mds);
57 		return false;
58 	}
59 
60 	head = msg->front.iov_base;
61 
62 	/* encode the cap metric */
63 	cap = (struct ceph_metric_cap *)(head + 1);
64 	cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
65 	cap->header.ver = 1;
66 	cap->header.compat = 1;
67 	cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len);
68 	cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
69 	cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
70 	cap->total = cpu_to_le64(nr_caps);
71 	items++;
72 
73 	/* encode the read latency metric */
74 	read = (struct ceph_metric_read_latency *)(cap + 1);
75 	read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
76 	read->header.ver = 2;
77 	read->header.compat = 1;
78 	read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
79 	sum = m->metric[METRIC_READ].latency_sum;
80 	ktime_to_ceph_timespec(&read->lat, sum);
81 	ktime_to_ceph_timespec(&read->avg, m->metric[METRIC_READ].latency_avg);
82 	read->sq_sum = cpu_to_le64(m->metric[METRIC_READ].latency_sq_sum);
83 	read->count = cpu_to_le64(m->metric[METRIC_READ].total);
84 	items++;
85 
86 	/* encode the write latency metric */
87 	write = (struct ceph_metric_write_latency *)(read + 1);
88 	write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
89 	write->header.ver = 2;
90 	write->header.compat = 1;
91 	write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
92 	sum = m->metric[METRIC_WRITE].latency_sum;
93 	ktime_to_ceph_timespec(&write->lat, sum);
94 	ktime_to_ceph_timespec(&write->avg, m->metric[METRIC_WRITE].latency_avg);
95 	write->sq_sum = cpu_to_le64(m->metric[METRIC_WRITE].latency_sq_sum);
96 	write->count = cpu_to_le64(m->metric[METRIC_WRITE].total);
97 	items++;
98 
99 	/* encode the metadata latency metric */
100 	meta = (struct ceph_metric_metadata_latency *)(write + 1);
101 	meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
102 	meta->header.ver = 2;
103 	meta->header.compat = 1;
104 	meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
105 	sum = m->metric[METRIC_METADATA].latency_sum;
106 	ktime_to_ceph_timespec(&meta->lat, sum);
107 	ktime_to_ceph_timespec(&meta->avg, m->metric[METRIC_METADATA].latency_avg);
108 	meta->sq_sum = cpu_to_le64(m->metric[METRIC_METADATA].latency_sq_sum);
109 	meta->count = cpu_to_le64(m->metric[METRIC_METADATA].total);
110 	items++;
111 
112 	/* encode the dentry lease metric */
113 	dlease = (struct ceph_metric_dlease *)(meta + 1);
114 	dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
115 	dlease->header.ver = 1;
116 	dlease->header.compat = 1;
117 	dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len);
118 	dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
119 	dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
120 	dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
121 	items++;
122 
123 	sum = percpu_counter_sum(&m->total_inodes);
124 
125 	/* encode the opened files metric */
126 	files = (struct ceph_opened_files *)(dlease + 1);
127 	files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
128 	files->header.ver = 1;
129 	files->header.compat = 1;
130 	files->header.data_len = cpu_to_le32(sizeof(*files) - header_len);
131 	files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files));
132 	files->total = cpu_to_le64(sum);
133 	items++;
134 
135 	/* encode the pinned icaps metric */
136 	icaps = (struct ceph_pinned_icaps *)(files + 1);
137 	icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
138 	icaps->header.ver = 1;
139 	icaps->header.compat = 1;
140 	icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len);
141 	icaps->pinned_icaps = cpu_to_le64(nr_caps);
142 	icaps->total = cpu_to_le64(sum);
143 	items++;
144 
145 	/* encode the opened inodes metric */
146 	inodes = (struct ceph_opened_inodes *)(icaps + 1);
147 	inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
148 	inodes->header.ver = 1;
149 	inodes->header.compat = 1;
150 	inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len);
151 	inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes));
152 	inodes->total = cpu_to_le64(sum);
153 	items++;
154 
155 	/* encode the read io size metric */
156 	rsize = (struct ceph_read_io_size *)(inodes + 1);
157 	rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
158 	rsize->header.ver = 1;
159 	rsize->header.compat = 1;
160 	rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
161 	rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
162 	rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
163 	items++;
164 
165 	/* encode the write io size metric */
166 	wsize = (struct ceph_write_io_size *)(rsize + 1);
167 	wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
168 	wsize->header.ver = 1;
169 	wsize->header.compat = 1;
170 	wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
171 	wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
172 	wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
173 	items++;
174 
175 	put_unaligned_le32(items, &head->num);
176 	msg->front.iov_len = len;
177 	msg->hdr.version = cpu_to_le16(1);
178 	msg->hdr.compat_version = cpu_to_le16(1);
179 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
180 	ceph_con_send(&s->s_con, msg);
181 
182 	return true;
183 }
184 
185 
metric_get_session(struct ceph_mds_client * mdsc)186 static void metric_get_session(struct ceph_mds_client *mdsc)
187 {
188 	struct ceph_mds_session *s;
189 	int i;
190 
191 	mutex_lock(&mdsc->mutex);
192 	for (i = 0; i < mdsc->max_sessions; i++) {
193 		s = __ceph_lookup_mds_session(mdsc, i);
194 		if (!s)
195 			continue;
196 
197 		/*
198 		 * Skip it if MDS doesn't support the metric collection,
199 		 * or the MDS will close the session's socket connection
200 		 * directly when it get this message.
201 		 */
202 		if (check_session_state(s) &&
203 		    test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
204 			mdsc->metric.session = s;
205 			break;
206 		}
207 
208 		ceph_put_mds_session(s);
209 	}
210 	mutex_unlock(&mdsc->mutex);
211 }
212 
metric_delayed_work(struct work_struct * work)213 static void metric_delayed_work(struct work_struct *work)
214 {
215 	struct ceph_client_metric *m =
216 		container_of(work, struct ceph_client_metric, delayed_work.work);
217 	struct ceph_mds_client *mdsc =
218 		container_of(m, struct ceph_mds_client, metric);
219 
220 	if (mdsc->stopping || disable_send_metrics)
221 		return;
222 
223 	if (!m->session || !check_session_state(m->session)) {
224 		if (m->session) {
225 			ceph_put_mds_session(m->session);
226 			m->session = NULL;
227 		}
228 		metric_get_session(mdsc);
229 	}
230 	if (m->session) {
231 		ceph_mdsc_send_metrics(mdsc, m->session);
232 		metric_schedule_delayed(m);
233 	}
234 }
235 
ceph_metric_init(struct ceph_client_metric * m)236 int ceph_metric_init(struct ceph_client_metric *m)
237 {
238 	struct ceph_metric *metric;
239 	int ret, i;
240 
241 	if (!m)
242 		return -EINVAL;
243 
244 	atomic64_set(&m->total_dentries, 0);
245 	ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
246 	if (ret)
247 		return ret;
248 
249 	ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
250 	if (ret)
251 		goto err_d_lease_mis;
252 
253 	atomic64_set(&m->total_caps, 0);
254 	ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
255 	if (ret)
256 		goto err_i_caps_hit;
257 
258 	ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
259 	if (ret)
260 		goto err_i_caps_mis;
261 
262 	for (i = 0; i < METRIC_MAX; i++) {
263 		metric = &m->metric[i];
264 		spin_lock_init(&metric->lock);
265 		metric->size_sum = 0;
266 		metric->size_min = U64_MAX;
267 		metric->size_max = 0;
268 		metric->total = 0;
269 		metric->latency_sum = 0;
270 		metric->latency_avg = 0;
271 		metric->latency_sq_sum = 0;
272 		metric->latency_min = KTIME_MAX;
273 		metric->latency_max = 0;
274 	}
275 
276 	atomic64_set(&m->opened_files, 0);
277 	ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
278 	if (ret)
279 		goto err_opened_inodes;
280 	ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
281 	if (ret)
282 		goto err_total_inodes;
283 
284 	m->session = NULL;
285 	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
286 
287 	return 0;
288 
289 err_total_inodes:
290 	percpu_counter_destroy(&m->opened_inodes);
291 err_opened_inodes:
292 	percpu_counter_destroy(&m->i_caps_mis);
293 err_i_caps_mis:
294 	percpu_counter_destroy(&m->i_caps_hit);
295 err_i_caps_hit:
296 	percpu_counter_destroy(&m->d_lease_mis);
297 err_d_lease_mis:
298 	percpu_counter_destroy(&m->d_lease_hit);
299 
300 	return ret;
301 }
302 
ceph_metric_destroy(struct ceph_client_metric * m)303 void ceph_metric_destroy(struct ceph_client_metric *m)
304 {
305 	if (!m)
306 		return;
307 
308 	cancel_delayed_work_sync(&m->delayed_work);
309 
310 	percpu_counter_destroy(&m->total_inodes);
311 	percpu_counter_destroy(&m->opened_inodes);
312 	percpu_counter_destroy(&m->i_caps_mis);
313 	percpu_counter_destroy(&m->i_caps_hit);
314 	percpu_counter_destroy(&m->d_lease_mis);
315 	percpu_counter_destroy(&m->d_lease_hit);
316 
317 	ceph_put_mds_session(m->session);
318 }
319 
320 #define METRIC_UPDATE_MIN_MAX(min, max, new)	\
321 {						\
322 	if (unlikely(new < min))		\
323 		min = new;			\
324 	if (unlikely(new > max))		\
325 		max = new;			\
326 }
327 
__update_mean_and_stdev(ktime_t total,ktime_t * lavg,ktime_t * sq_sump,ktime_t lat)328 static inline void __update_mean_and_stdev(ktime_t total, ktime_t *lavg,
329 					   ktime_t *sq_sump, ktime_t lat)
330 {
331 	ktime_t avg;
332 
333 	if (unlikely(total == 1)) {
334 		*lavg = lat;
335 	} else {
336 		/* the sq is (lat - old_avg) * (lat - new_avg) */
337 		avg = *lavg + div64_s64(lat - *lavg, total);
338 		*sq_sump += (lat - *lavg)*(lat - avg);
339 		*lavg = avg;
340 	}
341 }
342 
ceph_update_metrics(struct ceph_metric * m,ktime_t r_start,ktime_t r_end,unsigned int size,int rc)343 void ceph_update_metrics(struct ceph_metric *m,
344 			 ktime_t r_start, ktime_t r_end,
345 			 unsigned int size, int rc)
346 {
347 	ktime_t lat = ktime_sub(r_end, r_start);
348 	ktime_t total;
349 
350 	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
351 		return;
352 
353 	spin_lock(&m->lock);
354 	total = ++m->total;
355 	m->size_sum += size;
356 	METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
357 	m->latency_sum += lat;
358 	METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
359 	__update_mean_and_stdev(total, &m->latency_avg,	&m->latency_sq_sum,
360 				lat);
361 	spin_unlock(&m->lock);
362 }
363