xref: /linux/drivers/block/drbd/drbd_debugfs.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "drbd debugfs: " fmt
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/debugfs.h>
6 #include <linux/seq_file.h>
7 #include <linux/stat.h>
8 #include <linux/jiffies.h>
9 #include <linux/list.h>
10 
11 #include "drbd_int.h"
12 #include "drbd_req.h"
13 #include "drbd_debugfs.h"
14 
15 
16 /**********************************************************************
17  * Whenever you change the file format, remember to bump the version. *
18  **********************************************************************/
19 
20 static struct dentry *drbd_debugfs_root;
21 static struct dentry *drbd_debugfs_version;
22 static struct dentry *drbd_debugfs_resources;
23 static struct dentry *drbd_debugfs_minors;
24 
seq_print_age_or_dash(struct seq_file * m,bool valid,unsigned long dt)25 static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
26 {
27 	if (valid)
28 		seq_printf(m, "\t%d", jiffies_to_msecs(dt));
29 	else
30 		seq_printf(m, "\t-");
31 }
32 
__seq_print_rq_state_bit(struct seq_file * m,bool is_set,char * sep,const char * set_name,const char * unset_name)33 static void __seq_print_rq_state_bit(struct seq_file *m,
34 	bool is_set, char *sep, const char *set_name, const char *unset_name)
35 {
36 	if (is_set && set_name) {
37 		seq_putc(m, *sep);
38 		seq_puts(m, set_name);
39 		*sep = '|';
40 	} else if (!is_set && unset_name) {
41 		seq_putc(m, *sep);
42 		seq_puts(m, unset_name);
43 		*sep = '|';
44 	}
45 }
46 
seq_print_rq_state_bit(struct seq_file * m,bool is_set,char * sep,const char * set_name)47 static void seq_print_rq_state_bit(struct seq_file *m,
48 	bool is_set, char *sep, const char *set_name)
49 {
50 	__seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
51 }
52 
53 /* pretty print enum drbd_req_state_bits req->rq_state */
seq_print_request_state(struct seq_file * m,struct drbd_request * req)54 static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
55 {
56 	unsigned int s = req->rq_state;
57 	char sep = ' ';
58 	seq_printf(m, "\t0x%08x", s);
59 	seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
60 
61 	/* RQ_WRITE ignored, already reported */
62 	seq_puts(m, "\tlocal:");
63 	seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
64 	seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
65 	seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
66 	sep = ' ';
67 	seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
68 	seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
69 	seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
70 	seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
71 	if (sep == ' ')
72 		seq_puts(m, " -");
73 
74 	/* for_each_connection ... */
75 	seq_printf(m, "\tnet:");
76 	sep = ' ';
77 	seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
78 	seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
79 	seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
80 	seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
81 	seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
82 	seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
83 	if (sep == ' ')
84 		seq_puts(m, " -");
85 
86 	seq_printf(m, " :");
87 	sep = ' ';
88 	seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
89 	seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
90 	seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
91 	if (sep == ' ')
92 		seq_puts(m, " -");
93 	seq_printf(m, "\n");
94 }
95 
seq_print_one_request(struct seq_file * m,struct drbd_request * req,unsigned long now)96 static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
97 {
98 	/* change anything here, fixup header below! */
99 	unsigned int s = req->rq_state;
100 
101 #define RQ_HDR_1 "epoch\tsector\tsize\trw"
102 	seq_printf(m, "0x%x\t%llu\t%u\t%s",
103 		req->epoch,
104 		(unsigned long long)req->i.sector, req->i.size >> 9,
105 		(s & RQ_WRITE) ? "W" : "R");
106 
107 #define RQ_HDR_2 "\tstart\tin AL\tsubmit"
108 	seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
109 	seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
110 	seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
111 
112 #define RQ_HDR_3 "\tsent\tacked\tdone"
113 	seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
114 	seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
115 	seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
116 
117 #define RQ_HDR_4 "\tstate\n"
118 	seq_print_request_state(m, req);
119 }
120 #define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
121 
seq_print_minor_vnr_req(struct seq_file * m,struct drbd_request * req,unsigned long now)122 static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
123 {
124 	seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
125 	seq_print_one_request(m, req, now);
126 }
127 
seq_print_resource_pending_meta_io(struct seq_file * m,struct drbd_resource * resource,unsigned long now)128 static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
129 {
130 	struct drbd_device *device;
131 	unsigned int i;
132 
133 	seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
134 	rcu_read_lock();
135 	idr_for_each_entry(&resource->devices, device, i) {
136 		struct drbd_md_io tmp;
137 		/* In theory this is racy,
138 		 * in the sense that there could have been a
139 		 * drbd_md_put_buffer(); drbd_md_get_buffer();
140 		 * between accessing these members here.  */
141 		tmp = device->md_io;
142 		if (atomic_read(&tmp.in_use)) {
143 			seq_printf(m, "%u\t%u\t%d\t",
144 				device->minor, device->vnr,
145 				jiffies_to_msecs(now - tmp.start_jif));
146 			if (time_before(tmp.submit_jif, tmp.start_jif))
147 				seq_puts(m, "-\t");
148 			else
149 				seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
150 			seq_printf(m, "%s\n", tmp.current_use);
151 		}
152 	}
153 	rcu_read_unlock();
154 }
155 
seq_print_waiting_for_AL(struct seq_file * m,struct drbd_resource * resource,unsigned long now)156 static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
157 {
158 	struct drbd_device *device;
159 	unsigned int i;
160 
161 	seq_puts(m, "minor\tvnr\tage\t#waiting\n");
162 	rcu_read_lock();
163 	idr_for_each_entry(&resource->devices, device, i) {
164 		unsigned long jif;
165 		struct drbd_request *req;
166 		int n = atomic_read(&device->ap_actlog_cnt);
167 		if (n) {
168 			spin_lock_irq(&device->resource->req_lock);
169 			req = list_first_entry_or_null(&device->pending_master_completion[1],
170 				struct drbd_request, req_pending_master_completion);
171 			/* if the oldest request does not wait for the activity log
172 			 * it is not interesting for us here */
173 			if (req && !(req->rq_state & RQ_IN_ACT_LOG))
174 				jif = req->start_jif;
175 			else
176 				req = NULL;
177 			spin_unlock_irq(&device->resource->req_lock);
178 		}
179 		if (n) {
180 			seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
181 			if (req)
182 				seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
183 			else
184 				seq_puts(m, "-\t");
185 			seq_printf(m, "%u\n", n);
186 		}
187 	}
188 	rcu_read_unlock();
189 }
190 
seq_print_device_bitmap_io(struct seq_file * m,struct drbd_device * device,unsigned long now)191 static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
192 {
193 	struct drbd_bm_aio_ctx *ctx;
194 	unsigned long start_jif;
195 	unsigned int in_flight;
196 	unsigned int flags;
197 	spin_lock_irq(&device->resource->req_lock);
198 	ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
199 	if (ctx && ctx->done)
200 		ctx = NULL;
201 	if (ctx) {
202 		start_jif = ctx->start_jif;
203 		in_flight = atomic_read(&ctx->in_flight);
204 		flags = ctx->flags;
205 	}
206 	spin_unlock_irq(&device->resource->req_lock);
207 	if (ctx) {
208 		seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
209 			device->minor, device->vnr,
210 			(flags & BM_AIO_READ) ? 'R' : 'W',
211 			jiffies_to_msecs(now - start_jif),
212 			in_flight);
213 	}
214 }
215 
seq_print_resource_pending_bitmap_io(struct seq_file * m,struct drbd_resource * resource,unsigned long now)216 static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
217 {
218 	struct drbd_device *device;
219 	unsigned int i;
220 
221 	seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
222 	rcu_read_lock();
223 	idr_for_each_entry(&resource->devices, device, i) {
224 		seq_print_device_bitmap_io(m, device, now);
225 	}
226 	rcu_read_unlock();
227 }
228 
229 /* pretty print enum peer_req->flags */
seq_print_peer_request_flags(struct seq_file * m,struct drbd_peer_request * peer_req)230 static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
231 {
232 	unsigned long f = peer_req->flags;
233 	char sep = ' ';
234 
235 	__seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
236 	__seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
237 	seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
238 	seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
239 	seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
240 	seq_print_rq_state_bit(m, f & EE_TRIM, &sep, "trim");
241 	seq_print_rq_state_bit(m, f & EE_ZEROOUT, &sep, "zero-out");
242 	seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
243 	seq_putc(m, '\n');
244 }
245 
seq_print_peer_request(struct seq_file * m,struct drbd_device * device,struct list_head * lh,unsigned long now)246 static void seq_print_peer_request(struct seq_file *m,
247 	struct drbd_device *device, struct list_head *lh,
248 	unsigned long now)
249 {
250 	bool reported_preparing = false;
251 	struct drbd_peer_request *peer_req;
252 	list_for_each_entry(peer_req, lh, w.list) {
253 		if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
254 			continue;
255 
256 		if (device)
257 			seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
258 
259 		seq_printf(m, "%llu\t%u\t%c\t%u\t",
260 			(unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
261 			(peer_req->flags & EE_WRITE) ? 'W' : 'R',
262 			jiffies_to_msecs(now - peer_req->submit_jif));
263 		seq_print_peer_request_flags(m, peer_req);
264 		if (peer_req->flags & EE_SUBMITTED)
265 			break;
266 		else
267 			reported_preparing = true;
268 	}
269 }
270 
seq_print_device_peer_requests(struct seq_file * m,struct drbd_device * device,unsigned long now)271 static void seq_print_device_peer_requests(struct seq_file *m,
272 	struct drbd_device *device, unsigned long now)
273 {
274 	seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
275 	spin_lock_irq(&device->resource->req_lock);
276 	seq_print_peer_request(m, device, &device->active_ee, now);
277 	seq_print_peer_request(m, device, &device->read_ee, now);
278 	seq_print_peer_request(m, device, &device->sync_ee, now);
279 	spin_unlock_irq(&device->resource->req_lock);
280 	if (test_bit(FLUSH_PENDING, &device->flags)) {
281 		seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
282 			device->minor, device->vnr,
283 			jiffies_to_msecs(now - device->flush_jif));
284 	}
285 }
286 
seq_print_resource_pending_peer_requests(struct seq_file * m,struct drbd_resource * resource,unsigned long now)287 static void seq_print_resource_pending_peer_requests(struct seq_file *m,
288 	struct drbd_resource *resource, unsigned long now)
289 {
290 	struct drbd_device *device;
291 	unsigned int i;
292 
293 	rcu_read_lock();
294 	idr_for_each_entry(&resource->devices, device, i) {
295 		seq_print_device_peer_requests(m, device, now);
296 	}
297 	rcu_read_unlock();
298 }
299 
seq_print_resource_transfer_log_summary(struct seq_file * m,struct drbd_resource * resource,struct drbd_connection * connection,unsigned long now)300 static void seq_print_resource_transfer_log_summary(struct seq_file *m,
301 	struct drbd_resource *resource,
302 	struct drbd_connection *connection,
303 	unsigned long now)
304 {
305 	struct drbd_request *req;
306 	unsigned int count = 0;
307 	unsigned int show_state = 0;
308 
309 	seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
310 	spin_lock_irq(&resource->req_lock);
311 	list_for_each_entry(req, &connection->transfer_log, tl_requests) {
312 		unsigned int tmp = 0;
313 		unsigned int s;
314 		++count;
315 
316 		/* don't disable irq "forever" */
317 		if (!(count & 0x1ff)) {
318 			struct drbd_request *req_next;
319 			kref_get(&req->kref);
320 			spin_unlock_irq(&resource->req_lock);
321 			cond_resched();
322 			spin_lock_irq(&resource->req_lock);
323 			req_next = list_next_entry(req, tl_requests);
324 			if (kref_put(&req->kref, drbd_req_destroy))
325 				req = req_next;
326 			if (&req->tl_requests == &connection->transfer_log)
327 				break;
328 		}
329 
330 		s = req->rq_state;
331 
332 		/* This is meant to summarize timing issues, to be able to tell
333 		 * local disk problems from network problems.
334 		 * Skip requests, if we have shown an even older request with
335 		 * similar aspects already.  */
336 		if (req->master_bio == NULL)
337 			tmp |= 1;
338 		if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
339 			tmp |= 2;
340 		if (s & RQ_NET_MASK) {
341 			if (!(s & RQ_NET_SENT))
342 				tmp |= 4;
343 			if (s & RQ_NET_PENDING)
344 				tmp |= 8;
345 			if (!(s & RQ_NET_DONE))
346 				tmp |= 16;
347 		}
348 		if ((tmp & show_state) == tmp)
349 			continue;
350 		show_state |= tmp;
351 		seq_printf(m, "%u\t", count);
352 		seq_print_minor_vnr_req(m, req, now);
353 		if (show_state == 0x1f)
354 			break;
355 	}
356 	spin_unlock_irq(&resource->req_lock);
357 }
358 
359 /* TODO: transfer_log and friends should be moved to resource */
in_flight_summary_show(struct seq_file * m,void * pos)360 static int in_flight_summary_show(struct seq_file *m, void *pos)
361 {
362 	struct drbd_resource *resource = m->private;
363 	struct drbd_connection *connection;
364 	unsigned long jif = jiffies;
365 
366 	connection = first_connection(resource);
367 	/* This does not happen, actually.
368 	 * But be robust and prepare for future code changes. */
369 	if (!connection || !kref_get_unless_zero(&connection->kref))
370 		return -ESTALE;
371 
372 	/* BUMP me if you change the file format/content/presentation */
373 	seq_printf(m, "v: %u\n\n", 0);
374 
375 	seq_puts(m, "oldest bitmap IO\n");
376 	seq_print_resource_pending_bitmap_io(m, resource, jif);
377 	seq_putc(m, '\n');
378 
379 	seq_puts(m, "meta data IO\n");
380 	seq_print_resource_pending_meta_io(m, resource, jif);
381 	seq_putc(m, '\n');
382 
383 	seq_puts(m, "socket buffer stats\n");
384 	/* for each connection ... once we have more than one */
385 	rcu_read_lock();
386 	if (connection->data.socket) {
387 		/* open coded SIOCINQ, the "relevant" part */
388 		struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
389 		int answ = tp->rcv_nxt - tp->copied_seq;
390 		seq_printf(m, "unread receive buffer: %u Byte\n", answ);
391 		/* open coded SIOCOUTQ, the "relevant" part */
392 		answ = tp->write_seq - tp->snd_una;
393 		seq_printf(m, "unacked send buffer: %u Byte\n", answ);
394 	}
395 	rcu_read_unlock();
396 	seq_putc(m, '\n');
397 
398 	seq_puts(m, "oldest peer requests\n");
399 	seq_print_resource_pending_peer_requests(m, resource, jif);
400 	seq_putc(m, '\n');
401 
402 	seq_puts(m, "application requests waiting for activity log\n");
403 	seq_print_waiting_for_AL(m, resource, jif);
404 	seq_putc(m, '\n');
405 
406 	seq_puts(m, "oldest application requests\n");
407 	seq_print_resource_transfer_log_summary(m, resource, connection, jif);
408 	seq_putc(m, '\n');
409 
410 	jif = jiffies - jif;
411 	if (jif)
412 		seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
413 	kref_put(&connection->kref, drbd_destroy_connection);
414 	return 0;
415 }
416 
417 /* make sure at *open* time that the respective object won't go away. */
drbd_single_open(struct file * file,int (* show)(struct seq_file *,void *),void * data,struct kref * kref,void (* release)(struct kref *))418 static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
419 		                void *data, struct kref *kref,
420 				void (*release)(struct kref *))
421 {
422 	struct dentry *parent;
423 	int ret = -ESTALE;
424 
425 	/* Are we still linked,
426 	 * or has debugfs_remove() already been called? */
427 	parent = file->f_path.dentry->d_parent;
428 	/* serialize with d_delete() */
429 	inode_lock(d_inode(parent));
430 	/* Make sure the object is still alive */
431 	if (simple_positive(file->f_path.dentry)
432 	&& kref_get_unless_zero(kref))
433 		ret = 0;
434 	inode_unlock(d_inode(parent));
435 	if (!ret) {
436 		ret = single_open(file, show, data);
437 		if (ret)
438 			kref_put(kref, release);
439 	}
440 	return ret;
441 }
442 
in_flight_summary_open(struct inode * inode,struct file * file)443 static int in_flight_summary_open(struct inode *inode, struct file *file)
444 {
445 	struct drbd_resource *resource = inode->i_private;
446 	return drbd_single_open(file, in_flight_summary_show, resource,
447 				&resource->kref, drbd_destroy_resource);
448 }
449 
in_flight_summary_release(struct inode * inode,struct file * file)450 static int in_flight_summary_release(struct inode *inode, struct file *file)
451 {
452 	struct drbd_resource *resource = inode->i_private;
453 	kref_put(&resource->kref, drbd_destroy_resource);
454 	return single_release(inode, file);
455 }
456 
457 static const struct file_operations in_flight_summary_fops = {
458 	.owner		= THIS_MODULE,
459 	.open		= in_flight_summary_open,
460 	.read		= seq_read,
461 	.llseek		= seq_lseek,
462 	.release	= in_flight_summary_release,
463 };
464 
drbd_debugfs_resource_add(struct drbd_resource * resource)465 void drbd_debugfs_resource_add(struct drbd_resource *resource)
466 {
467 	struct dentry *dentry;
468 
469 	dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
470 	resource->debugfs_res = dentry;
471 
472 	dentry = debugfs_create_dir("volumes", resource->debugfs_res);
473 	resource->debugfs_res_volumes = dentry;
474 
475 	dentry = debugfs_create_dir("connections", resource->debugfs_res);
476 	resource->debugfs_res_connections = dentry;
477 
478 	dentry = debugfs_create_file("in_flight_summary", 0440,
479 				     resource->debugfs_res, resource,
480 				     &in_flight_summary_fops);
481 	resource->debugfs_res_in_flight_summary = dentry;
482 }
483 
drbd_debugfs_remove(struct dentry ** dp)484 static void drbd_debugfs_remove(struct dentry **dp)
485 {
486 	debugfs_remove(*dp);
487 	*dp = NULL;
488 }
489 
drbd_debugfs_resource_cleanup(struct drbd_resource * resource)490 void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
491 {
492 	/* it is ok to call debugfs_remove(NULL) */
493 	drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
494 	drbd_debugfs_remove(&resource->debugfs_res_connections);
495 	drbd_debugfs_remove(&resource->debugfs_res_volumes);
496 	drbd_debugfs_remove(&resource->debugfs_res);
497 }
498 
seq_print_one_timing_detail(struct seq_file * m,const struct drbd_thread_timing_details * tdp,unsigned long now)499 static void seq_print_one_timing_detail(struct seq_file *m,
500 	const struct drbd_thread_timing_details *tdp,
501 	unsigned long now)
502 {
503 	struct drbd_thread_timing_details td;
504 	/* No locking...
505 	 * use temporary assignment to get at consistent data. */
506 	do {
507 		td = *tdp;
508 	} while (td.cb_nr != tdp->cb_nr);
509 	if (!td.cb_addr)
510 		return;
511 	seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
512 			td.cb_nr,
513 			jiffies_to_msecs(now - td.start_jif),
514 			td.caller_fn, td.line,
515 			td.cb_addr);
516 }
517 
seq_print_timing_details(struct seq_file * m,const char * title,unsigned int cb_nr,struct drbd_thread_timing_details * tdp,unsigned long now)518 static void seq_print_timing_details(struct seq_file *m,
519 		const char *title,
520 		unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
521 {
522 	unsigned int start_idx;
523 	unsigned int i;
524 
525 	seq_printf(m, "%s\n", title);
526 	/* If not much is going on, this will result in natural ordering.
527 	 * If it is very busy, we will possibly skip events, or even see wrap
528 	 * arounds, which could only be avoided with locking.
529 	 */
530 	start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
531 	for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
532 		seq_print_one_timing_detail(m, tdp+i, now);
533 	for (i = 0; i < start_idx; i++)
534 		seq_print_one_timing_detail(m, tdp+i, now);
535 }
536 
callback_history_show(struct seq_file * m,void * ignored)537 static int callback_history_show(struct seq_file *m, void *ignored)
538 {
539 	struct drbd_connection *connection = m->private;
540 	unsigned long jif = jiffies;
541 
542 	/* BUMP me if you change the file format/content/presentation */
543 	seq_printf(m, "v: %u\n\n", 0);
544 
545 	seq_puts(m, "n\tage\tcallsite\tfn\n");
546 	seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
547 	seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
548 	return 0;
549 }
550 
callback_history_open(struct inode * inode,struct file * file)551 static int callback_history_open(struct inode *inode, struct file *file)
552 {
553 	struct drbd_connection *connection = inode->i_private;
554 	return drbd_single_open(file, callback_history_show, connection,
555 				&connection->kref, drbd_destroy_connection);
556 }
557 
callback_history_release(struct inode * inode,struct file * file)558 static int callback_history_release(struct inode *inode, struct file *file)
559 {
560 	struct drbd_connection *connection = inode->i_private;
561 	kref_put(&connection->kref, drbd_destroy_connection);
562 	return single_release(inode, file);
563 }
564 
565 static const struct file_operations connection_callback_history_fops = {
566 	.owner		= THIS_MODULE,
567 	.open		= callback_history_open,
568 	.read		= seq_read,
569 	.llseek		= seq_lseek,
570 	.release	= callback_history_release,
571 };
572 
connection_oldest_requests_show(struct seq_file * m,void * ignored)573 static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
574 {
575 	struct drbd_connection *connection = m->private;
576 	unsigned long now = jiffies;
577 	struct drbd_request *r1, *r2;
578 
579 	/* BUMP me if you change the file format/content/presentation */
580 	seq_printf(m, "v: %u\n\n", 0);
581 
582 	spin_lock_irq(&connection->resource->req_lock);
583 	r1 = connection->req_next;
584 	if (r1)
585 		seq_print_minor_vnr_req(m, r1, now);
586 	r2 = connection->req_ack_pending;
587 	if (r2 && r2 != r1) {
588 		r1 = r2;
589 		seq_print_minor_vnr_req(m, r1, now);
590 	}
591 	r2 = connection->req_not_net_done;
592 	if (r2 && r2 != r1)
593 		seq_print_minor_vnr_req(m, r2, now);
594 	spin_unlock_irq(&connection->resource->req_lock);
595 	return 0;
596 }
597 
connection_oldest_requests_open(struct inode * inode,struct file * file)598 static int connection_oldest_requests_open(struct inode *inode, struct file *file)
599 {
600 	struct drbd_connection *connection = inode->i_private;
601 	return drbd_single_open(file, connection_oldest_requests_show, connection,
602 				&connection->kref, drbd_destroy_connection);
603 }
604 
connection_oldest_requests_release(struct inode * inode,struct file * file)605 static int connection_oldest_requests_release(struct inode *inode, struct file *file)
606 {
607 	struct drbd_connection *connection = inode->i_private;
608 	kref_put(&connection->kref, drbd_destroy_connection);
609 	return single_release(inode, file);
610 }
611 
612 static const struct file_operations connection_oldest_requests_fops = {
613 	.owner		= THIS_MODULE,
614 	.open		= connection_oldest_requests_open,
615 	.read		= seq_read,
616 	.llseek		= seq_lseek,
617 	.release	= connection_oldest_requests_release,
618 };
619 
drbd_debugfs_connection_add(struct drbd_connection * connection)620 void drbd_debugfs_connection_add(struct drbd_connection *connection)
621 {
622 	struct dentry *conns_dir = connection->resource->debugfs_res_connections;
623 	struct dentry *dentry;
624 
625 	/* Once we enable mutliple peers,
626 	 * these connections will have descriptive names.
627 	 * For now, it is just the one connection to the (only) "peer". */
628 	dentry = debugfs_create_dir("peer", conns_dir);
629 	connection->debugfs_conn = dentry;
630 
631 	dentry = debugfs_create_file("callback_history", 0440,
632 				     connection->debugfs_conn, connection,
633 				     &connection_callback_history_fops);
634 	connection->debugfs_conn_callback_history = dentry;
635 
636 	dentry = debugfs_create_file("oldest_requests", 0440,
637 				     connection->debugfs_conn, connection,
638 				     &connection_oldest_requests_fops);
639 	connection->debugfs_conn_oldest_requests = dentry;
640 }
641 
drbd_debugfs_connection_cleanup(struct drbd_connection * connection)642 void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
643 {
644 	drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
645 	drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
646 	drbd_debugfs_remove(&connection->debugfs_conn);
647 }
648 
resync_dump_detail(struct seq_file * m,struct lc_element * e)649 static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
650 {
651        struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
652 
653        seq_printf(m, "%5d %s %s %s", bme->rs_left,
654 		  test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
655 		  test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
656 		  test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
657 		  );
658 }
659 
device_resync_extents_show(struct seq_file * m,void * ignored)660 static int device_resync_extents_show(struct seq_file *m, void *ignored)
661 {
662 	struct drbd_device *device = m->private;
663 
664 	/* BUMP me if you change the file format/content/presentation */
665 	seq_printf(m, "v: %u\n\n", 0);
666 
667 	if (get_ldev_if_state(device, D_FAILED)) {
668 		lc_seq_printf_stats(m, device->resync);
669 		lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
670 		put_ldev(device);
671 	}
672 	return 0;
673 }
674 
device_act_log_extents_show(struct seq_file * m,void * ignored)675 static int device_act_log_extents_show(struct seq_file *m, void *ignored)
676 {
677 	struct drbd_device *device = m->private;
678 
679 	/* BUMP me if you change the file format/content/presentation */
680 	seq_printf(m, "v: %u\n\n", 0);
681 
682 	if (get_ldev_if_state(device, D_FAILED)) {
683 		lc_seq_printf_stats(m, device->act_log);
684 		lc_seq_dump_details(m, device->act_log, "", NULL);
685 		put_ldev(device);
686 	}
687 	return 0;
688 }
689 
device_oldest_requests_show(struct seq_file * m,void * ignored)690 static int device_oldest_requests_show(struct seq_file *m, void *ignored)
691 {
692 	struct drbd_device *device = m->private;
693 	struct drbd_resource *resource = device->resource;
694 	unsigned long now = jiffies;
695 	struct drbd_request *r1, *r2;
696 	int i;
697 
698 	/* BUMP me if you change the file format/content/presentation */
699 	seq_printf(m, "v: %u\n\n", 0);
700 
701 	seq_puts(m, RQ_HDR);
702 	spin_lock_irq(&resource->req_lock);
703 	/* WRITE, then READ */
704 	for (i = 1; i >= 0; --i) {
705 		r1 = list_first_entry_or_null(&device->pending_master_completion[i],
706 			struct drbd_request, req_pending_master_completion);
707 		r2 = list_first_entry_or_null(&device->pending_completion[i],
708 			struct drbd_request, req_pending_local);
709 		if (r1)
710 			seq_print_one_request(m, r1, now);
711 		if (r2 && r2 != r1)
712 			seq_print_one_request(m, r2, now);
713 	}
714 	spin_unlock_irq(&resource->req_lock);
715 	return 0;
716 }
717 
device_data_gen_id_show(struct seq_file * m,void * ignored)718 static int device_data_gen_id_show(struct seq_file *m, void *ignored)
719 {
720 	struct drbd_device *device = m->private;
721 	struct drbd_md *md;
722 	enum drbd_uuid_index idx;
723 
724 	if (!get_ldev_if_state(device, D_FAILED))
725 		return -ENODEV;
726 
727 	md = &device->ldev->md;
728 	spin_lock_irq(&md->uuid_lock);
729 	for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) {
730 		seq_printf(m, "0x%016llX\n", md->uuid[idx]);
731 	}
732 	spin_unlock_irq(&md->uuid_lock);
733 	put_ldev(device);
734 	return 0;
735 }
736 
device_ed_gen_id_show(struct seq_file * m,void * ignored)737 static int device_ed_gen_id_show(struct seq_file *m, void *ignored)
738 {
739 	struct drbd_device *device = m->private;
740 	seq_printf(m, "0x%016llX\n", (unsigned long long)device->ed_uuid);
741 	return 0;
742 }
743 
744 #define drbd_debugfs_device_attr(name)						\
745 static int device_ ## name ## _open(struct inode *inode, struct file *file)	\
746 {										\
747 	struct drbd_device *device = inode->i_private;				\
748 	return drbd_single_open(file, device_ ## name ## _show, device,		\
749 				&device->kref, drbd_destroy_device);		\
750 }										\
751 static int device_ ## name ## _release(struct inode *inode, struct file *file)	\
752 {										\
753 	struct drbd_device *device = inode->i_private;				\
754 	kref_put(&device->kref, drbd_destroy_device);				\
755 	return single_release(inode, file);					\
756 }										\
757 static const struct file_operations device_ ## name ## _fops = {		\
758 	.owner		= THIS_MODULE,						\
759 	.open		= device_ ## name ## _open,				\
760 	.read		= seq_read,						\
761 	.llseek		= seq_lseek,						\
762 	.release	= device_ ## name ## _release,				\
763 };
764 
765 drbd_debugfs_device_attr(oldest_requests)
drbd_debugfs_device_attr(act_log_extents)766 drbd_debugfs_device_attr(act_log_extents)
767 drbd_debugfs_device_attr(resync_extents)
768 drbd_debugfs_device_attr(data_gen_id)
769 drbd_debugfs_device_attr(ed_gen_id)
770 
771 void drbd_debugfs_device_add(struct drbd_device *device)
772 {
773 	struct dentry *vols_dir = device->resource->debugfs_res_volumes;
774 	char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
775 	char vnr_buf[8];   /* volume number vnr is even 16 bit only; */
776 	char *slink_name = NULL;
777 
778 	struct dentry *dentry;
779 	if (!vols_dir || !drbd_debugfs_minors)
780 		return;
781 
782 	snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
783 	dentry = debugfs_create_dir(vnr_buf, vols_dir);
784 	device->debugfs_vol = dentry;
785 
786 	snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
787 	slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
788 			device->resource->name, device->vnr);
789 	if (!slink_name)
790 		goto fail;
791 	dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
792 	device->debugfs_minor = dentry;
793 	kfree(slink_name);
794 	slink_name = NULL;
795 
796 #define DCF(name)	do {					\
797 	dentry = debugfs_create_file(#name, 0440,	\
798 			device->debugfs_vol, device,		\
799 			&device_ ## name ## _fops);		\
800 	device->debugfs_vol_ ## name = dentry;			\
801 	} while (0)
802 
803 	DCF(oldest_requests);
804 	DCF(act_log_extents);
805 	DCF(resync_extents);
806 	DCF(data_gen_id);
807 	DCF(ed_gen_id);
808 #undef DCF
809 	return;
810 
811 fail:
812 	drbd_debugfs_device_cleanup(device);
813 	drbd_err(device, "failed to create debugfs entries\n");
814 }
815 
drbd_debugfs_device_cleanup(struct drbd_device * device)816 void drbd_debugfs_device_cleanup(struct drbd_device *device)
817 {
818 	drbd_debugfs_remove(&device->debugfs_minor);
819 	drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
820 	drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
821 	drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
822 	drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
823 	drbd_debugfs_remove(&device->debugfs_vol_ed_gen_id);
824 	drbd_debugfs_remove(&device->debugfs_vol);
825 }
826 
drbd_debugfs_peer_device_add(struct drbd_peer_device * peer_device)827 void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
828 {
829 	struct dentry *conn_dir = peer_device->connection->debugfs_conn;
830 	struct dentry *dentry;
831 	char vnr_buf[8];
832 
833 	snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
834 	dentry = debugfs_create_dir(vnr_buf, conn_dir);
835 	peer_device->debugfs_peer_dev = dentry;
836 }
837 
drbd_debugfs_peer_device_cleanup(struct drbd_peer_device * peer_device)838 void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
839 {
840 	drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
841 }
842 
drbd_version_show(struct seq_file * m,void * ignored)843 static int drbd_version_show(struct seq_file *m, void *ignored)
844 {
845 	seq_printf(m, "# %s\n", drbd_buildtag());
846 	seq_printf(m, "VERSION=%s\n", REL_VERSION);
847 	seq_printf(m, "API_VERSION=%u\n", GENL_MAGIC_VERSION);
848 	seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
849 	seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
850 	return 0;
851 }
852 
drbd_version_open(struct inode * inode,struct file * file)853 static int drbd_version_open(struct inode *inode, struct file *file)
854 {
855 	return single_open(file, drbd_version_show, NULL);
856 }
857 
858 static const struct file_operations drbd_version_fops = {
859 	.owner = THIS_MODULE,
860 	.open = drbd_version_open,
861 	.llseek = seq_lseek,
862 	.read = seq_read,
863 	.release = single_release,
864 };
865 
866 /* not __exit, may be indirectly called
867  * from the module-load-failure path as well. */
drbd_debugfs_cleanup(void)868 void drbd_debugfs_cleanup(void)
869 {
870 	drbd_debugfs_remove(&drbd_debugfs_resources);
871 	drbd_debugfs_remove(&drbd_debugfs_minors);
872 	drbd_debugfs_remove(&drbd_debugfs_version);
873 	drbd_debugfs_remove(&drbd_debugfs_root);
874 }
875 
drbd_debugfs_init(void)876 void __init drbd_debugfs_init(void)
877 {
878 	struct dentry *dentry;
879 
880 	dentry = debugfs_create_dir("drbd", NULL);
881 	drbd_debugfs_root = dentry;
882 
883 	dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
884 	drbd_debugfs_version = dentry;
885 
886 	dentry = debugfs_create_dir("resources", drbd_debugfs_root);
887 	drbd_debugfs_resources = dentry;
888 
889 	dentry = debugfs_create_dir("minors", drbd_debugfs_root);
890 	drbd_debugfs_minors = dentry;
891 }
892