xref: /linux/drivers/block/drbd/drbd_debugfs.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #define pr_fmt(fmt) "drbd debugfs: " fmt
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/debugfs.h>
5 #include <linux/seq_file.h>
6 #include <linux/stat.h>
7 #include <linux/jiffies.h>
8 #include <linux/list.h>
9 
10 #include "drbd_int.h"
11 #include "drbd_req.h"
12 #include "drbd_debugfs.h"
13 
14 
15 /**********************************************************************
16  * Whenever you change the file format, remember to bump the version. *
17  **********************************************************************/
18 
19 static struct dentry *drbd_debugfs_root;
20 static struct dentry *drbd_debugfs_version;
21 static struct dentry *drbd_debugfs_resources;
22 static struct dentry *drbd_debugfs_minors;
23 
24 static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
25 {
26 	if (valid)
27 		seq_printf(m, "\t%d", jiffies_to_msecs(dt));
28 	else
29 		seq_printf(m, "\t-");
30 }
31 
32 static void __seq_print_rq_state_bit(struct seq_file *m,
33 	bool is_set, char *sep, const char *set_name, const char *unset_name)
34 {
35 	if (is_set && set_name) {
36 		seq_putc(m, *sep);
37 		seq_puts(m, set_name);
38 		*sep = '|';
39 	} else if (!is_set && unset_name) {
40 		seq_putc(m, *sep);
41 		seq_puts(m, unset_name);
42 		*sep = '|';
43 	}
44 }
45 
46 static void seq_print_rq_state_bit(struct seq_file *m,
47 	bool is_set, char *sep, const char *set_name)
48 {
49 	__seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
50 }
51 
52 /* pretty print enum drbd_req_state_bits req->rq_state */
53 static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
54 {
55 	unsigned int s = req->rq_state;
56 	char sep = ' ';
57 	seq_printf(m, "\t0x%08x", s);
58 	seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
59 
60 	/* RQ_WRITE ignored, already reported */
61 	seq_puts(m, "\tlocal:");
62 	seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
63 	seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
64 	seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
65 	sep = ' ';
66 	seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
67 	seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
68 	seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
69 	seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
70 	if (sep == ' ')
71 		seq_puts(m, " -");
72 
73 	/* for_each_connection ... */
74 	seq_printf(m, "\tnet:");
75 	sep = ' ';
76 	seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
77 	seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
78 	seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
79 	seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
80 	seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
81 	seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
82 	if (sep == ' ')
83 		seq_puts(m, " -");
84 
85 	seq_printf(m, " :");
86 	sep = ' ';
87 	seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
88 	seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
89 	seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
90 	if (sep == ' ')
91 		seq_puts(m, " -");
92 	seq_printf(m, "\n");
93 }
94 
95 static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
96 {
97 	/* change anything here, fixup header below! */
98 	unsigned int s = req->rq_state;
99 
100 #define RQ_HDR_1 "epoch\tsector\tsize\trw"
101 	seq_printf(m, "0x%x\t%llu\t%u\t%s",
102 		req->epoch,
103 		(unsigned long long)req->i.sector, req->i.size >> 9,
104 		(s & RQ_WRITE) ? "W" : "R");
105 
106 #define RQ_HDR_2 "\tstart\tin AL\tsubmit"
107 	seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
108 	seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
109 	seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
110 
111 #define RQ_HDR_3 "\tsent\tacked\tdone"
112 	seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
113 	seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
114 	seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
115 
116 #define RQ_HDR_4 "\tstate\n"
117 	seq_print_request_state(m, req);
118 }
119 #define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
120 
121 static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
122 {
123 	seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
124 	seq_print_one_request(m, req, now);
125 }
126 
127 static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
128 {
129 	struct drbd_device *device;
130 	unsigned int i;
131 
132 	seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
133 	rcu_read_lock();
134 	idr_for_each_entry(&resource->devices, device, i) {
135 		struct drbd_md_io tmp;
136 		/* In theory this is racy,
137 		 * in the sense that there could have been a
138 		 * drbd_md_put_buffer(); drbd_md_get_buffer();
139 		 * between accessing these members here.  */
140 		tmp = device->md_io;
141 		if (atomic_read(&tmp.in_use)) {
142 			seq_printf(m, "%u\t%u\t%d\t",
143 				device->minor, device->vnr,
144 				jiffies_to_msecs(now - tmp.start_jif));
145 			if (time_before(tmp.submit_jif, tmp.start_jif))
146 				seq_puts(m, "-\t");
147 			else
148 				seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
149 			seq_printf(m, "%s\n", tmp.current_use);
150 		}
151 	}
152 	rcu_read_unlock();
153 }
154 
155 static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
156 {
157 	struct drbd_device *device;
158 	unsigned int i;
159 
160 	seq_puts(m, "minor\tvnr\tage\t#waiting\n");
161 	rcu_read_lock();
162 	idr_for_each_entry(&resource->devices, device, i) {
163 		unsigned long jif;
164 		struct drbd_request *req;
165 		int n = atomic_read(&device->ap_actlog_cnt);
166 		if (n) {
167 			spin_lock_irq(&device->resource->req_lock);
168 			req = list_first_entry_or_null(&device->pending_master_completion[1],
169 				struct drbd_request, req_pending_master_completion);
170 			/* if the oldest request does not wait for the activity log
171 			 * it is not interesting for us here */
172 			if (req && !(req->rq_state & RQ_IN_ACT_LOG))
173 				jif = req->start_jif;
174 			else
175 				req = NULL;
176 			spin_unlock_irq(&device->resource->req_lock);
177 		}
178 		if (n) {
179 			seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
180 			if (req)
181 				seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
182 			else
183 				seq_puts(m, "-\t");
184 			seq_printf(m, "%u\n", n);
185 		}
186 	}
187 	rcu_read_unlock();
188 }
189 
190 static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
191 {
192 	struct drbd_bm_aio_ctx *ctx;
193 	unsigned long start_jif;
194 	unsigned int in_flight;
195 	unsigned int flags;
196 	spin_lock_irq(&device->resource->req_lock);
197 	ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
198 	if (ctx && ctx->done)
199 		ctx = NULL;
200 	if (ctx) {
201 		start_jif = ctx->start_jif;
202 		in_flight = atomic_read(&ctx->in_flight);
203 		flags = ctx->flags;
204 	}
205 	spin_unlock_irq(&device->resource->req_lock);
206 	if (ctx) {
207 		seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
208 			device->minor, device->vnr,
209 			(flags & BM_AIO_READ) ? 'R' : 'W',
210 			jiffies_to_msecs(now - start_jif),
211 			in_flight);
212 	}
213 }
214 
215 static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
216 {
217 	struct drbd_device *device;
218 	unsigned int i;
219 
220 	seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
221 	rcu_read_lock();
222 	idr_for_each_entry(&resource->devices, device, i) {
223 		seq_print_device_bitmap_io(m, device, now);
224 	}
225 	rcu_read_unlock();
226 }
227 
228 /* pretty print enum peer_req->flags */
229 static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
230 {
231 	unsigned long f = peer_req->flags;
232 	char sep = ' ';
233 
234 	__seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
235 	__seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
236 	seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
237 	seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
238 	seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
239 
240 	if (f & EE_IS_TRIM) {
241 		seq_putc(m, sep);
242 		sep = '|';
243 		if (f & EE_IS_TRIM_USE_ZEROOUT)
244 			seq_puts(m, "zero-out");
245 		else
246 			seq_puts(m, "trim");
247 	}
248 	seq_putc(m, '\n');
249 }
250 
251 static void seq_print_peer_request(struct seq_file *m,
252 	struct drbd_device *device, struct list_head *lh,
253 	unsigned long now)
254 {
255 	bool reported_preparing = false;
256 	struct drbd_peer_request *peer_req;
257 	list_for_each_entry(peer_req, lh, w.list) {
258 		if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
259 			continue;
260 
261 		if (device)
262 			seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
263 
264 		seq_printf(m, "%llu\t%u\t%c\t%u\t",
265 			(unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
266 			(peer_req->flags & EE_WRITE) ? 'W' : 'R',
267 			jiffies_to_msecs(now - peer_req->submit_jif));
268 		seq_print_peer_request_flags(m, peer_req);
269 		if (peer_req->flags & EE_SUBMITTED)
270 			break;
271 		else
272 			reported_preparing = true;
273 	}
274 }
275 
276 static void seq_print_device_peer_requests(struct seq_file *m,
277 	struct drbd_device *device, unsigned long now)
278 {
279 	seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
280 	spin_lock_irq(&device->resource->req_lock);
281 	seq_print_peer_request(m, device, &device->active_ee, now);
282 	seq_print_peer_request(m, device, &device->read_ee, now);
283 	seq_print_peer_request(m, device, &device->sync_ee, now);
284 	spin_unlock_irq(&device->resource->req_lock);
285 	if (test_bit(FLUSH_PENDING, &device->flags)) {
286 		seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
287 			device->minor, device->vnr,
288 			jiffies_to_msecs(now - device->flush_jif));
289 	}
290 }
291 
292 static void seq_print_resource_pending_peer_requests(struct seq_file *m,
293 	struct drbd_resource *resource, unsigned long now)
294 {
295 	struct drbd_device *device;
296 	unsigned int i;
297 
298 	rcu_read_lock();
299 	idr_for_each_entry(&resource->devices, device, i) {
300 		seq_print_device_peer_requests(m, device, now);
301 	}
302 	rcu_read_unlock();
303 }
304 
305 static void seq_print_resource_transfer_log_summary(struct seq_file *m,
306 	struct drbd_resource *resource,
307 	struct drbd_connection *connection,
308 	unsigned long now)
309 {
310 	struct drbd_request *req;
311 	unsigned int count = 0;
312 	unsigned int show_state = 0;
313 
314 	seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
315 	spin_lock_irq(&resource->req_lock);
316 	list_for_each_entry(req, &connection->transfer_log, tl_requests) {
317 		unsigned int tmp = 0;
318 		unsigned int s;
319 		++count;
320 
321 		/* don't disable irq "forever" */
322 		if (!(count & 0x1ff)) {
323 			struct drbd_request *req_next;
324 			kref_get(&req->kref);
325 			spin_unlock_irq(&resource->req_lock);
326 			cond_resched();
327 			spin_lock_irq(&resource->req_lock);
328 			req_next = list_next_entry(req, tl_requests);
329 			if (kref_put(&req->kref, drbd_req_destroy))
330 				req = req_next;
331 			if (&req->tl_requests == &connection->transfer_log)
332 				break;
333 		}
334 
335 		s = req->rq_state;
336 
337 		/* This is meant to summarize timing issues, to be able to tell
338 		 * local disk problems from network problems.
339 		 * Skip requests, if we have shown an even older request with
340 		 * similar aspects already.  */
341 		if (req->master_bio == NULL)
342 			tmp |= 1;
343 		if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
344 			tmp |= 2;
345 		if (s & RQ_NET_MASK) {
346 			if (!(s & RQ_NET_SENT))
347 				tmp |= 4;
348 			if (s & RQ_NET_PENDING)
349 				tmp |= 8;
350 			if (!(s & RQ_NET_DONE))
351 				tmp |= 16;
352 		}
353 		if ((tmp & show_state) == tmp)
354 			continue;
355 		show_state |= tmp;
356 		seq_printf(m, "%u\t", count);
357 		seq_print_minor_vnr_req(m, req, now);
358 		if (show_state == 0x1f)
359 			break;
360 	}
361 	spin_unlock_irq(&resource->req_lock);
362 }
363 
364 /* TODO: transfer_log and friends should be moved to resource */
365 static int in_flight_summary_show(struct seq_file *m, void *pos)
366 {
367 	struct drbd_resource *resource = m->private;
368 	struct drbd_connection *connection;
369 	unsigned long jif = jiffies;
370 
371 	connection = first_connection(resource);
372 	/* This does not happen, actually.
373 	 * But be robust and prepare for future code changes. */
374 	if (!connection || !kref_get_unless_zero(&connection->kref))
375 		return -ESTALE;
376 
377 	/* BUMP me if you change the file format/content/presentation */
378 	seq_printf(m, "v: %u\n\n", 0);
379 
380 	seq_puts(m, "oldest bitmap IO\n");
381 	seq_print_resource_pending_bitmap_io(m, resource, jif);
382 	seq_putc(m, '\n');
383 
384 	seq_puts(m, "meta data IO\n");
385 	seq_print_resource_pending_meta_io(m, resource, jif);
386 	seq_putc(m, '\n');
387 
388 	seq_puts(m, "socket buffer stats\n");
389 	/* for each connection ... once we have more than one */
390 	rcu_read_lock();
391 	if (connection->data.socket) {
392 		/* open coded SIOCINQ, the "relevant" part */
393 		struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
394 		int answ = tp->rcv_nxt - tp->copied_seq;
395 		seq_printf(m, "unread receive buffer: %u Byte\n", answ);
396 		/* open coded SIOCOUTQ, the "relevant" part */
397 		answ = tp->write_seq - tp->snd_una;
398 		seq_printf(m, "unacked send buffer: %u Byte\n", answ);
399 	}
400 	rcu_read_unlock();
401 	seq_putc(m, '\n');
402 
403 	seq_puts(m, "oldest peer requests\n");
404 	seq_print_resource_pending_peer_requests(m, resource, jif);
405 	seq_putc(m, '\n');
406 
407 	seq_puts(m, "application requests waiting for activity log\n");
408 	seq_print_waiting_for_AL(m, resource, jif);
409 	seq_putc(m, '\n');
410 
411 	seq_puts(m, "oldest application requests\n");
412 	seq_print_resource_transfer_log_summary(m, resource, connection, jif);
413 	seq_putc(m, '\n');
414 
415 	jif = jiffies - jif;
416 	if (jif)
417 		seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
418 	kref_put(&connection->kref, drbd_destroy_connection);
419 	return 0;
420 }
421 
422 /* make sure at *open* time that the respective object won't go away. */
423 static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
424 		                void *data, struct kref *kref,
425 				void (*release)(struct kref *))
426 {
427 	struct dentry *parent;
428 	int ret = -ESTALE;
429 
430 	/* Are we still linked,
431 	 * or has debugfs_remove() already been called? */
432 	parent = file->f_path.dentry->d_parent;
433 	/* not sure if this can happen: */
434 	if (!parent || d_really_is_negative(parent))
435 		goto out;
436 	/* serialize with d_delete() */
437 	mutex_lock(&d_inode(parent)->i_mutex);
438 	/* Make sure the object is still alive */
439 	if (simple_positive(file->f_path.dentry)
440 	&& kref_get_unless_zero(kref))
441 		ret = 0;
442 	mutex_unlock(&d_inode(parent)->i_mutex);
443 	if (!ret) {
444 		ret = single_open(file, show, data);
445 		if (ret)
446 			kref_put(kref, release);
447 	}
448 out:
449 	return ret;
450 }
451 
452 static int in_flight_summary_open(struct inode *inode, struct file *file)
453 {
454 	struct drbd_resource *resource = inode->i_private;
455 	return drbd_single_open(file, in_flight_summary_show, resource,
456 				&resource->kref, drbd_destroy_resource);
457 }
458 
459 static int in_flight_summary_release(struct inode *inode, struct file *file)
460 {
461 	struct drbd_resource *resource = inode->i_private;
462 	kref_put(&resource->kref, drbd_destroy_resource);
463 	return single_release(inode, file);
464 }
465 
466 static const struct file_operations in_flight_summary_fops = {
467 	.owner		= THIS_MODULE,
468 	.open		= in_flight_summary_open,
469 	.read		= seq_read,
470 	.llseek		= seq_lseek,
471 	.release	= in_flight_summary_release,
472 };
473 
474 void drbd_debugfs_resource_add(struct drbd_resource *resource)
475 {
476 	struct dentry *dentry;
477 	if (!drbd_debugfs_resources)
478 		return;
479 
480 	dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
481 	if (IS_ERR_OR_NULL(dentry))
482 		goto fail;
483 	resource->debugfs_res = dentry;
484 
485 	dentry = debugfs_create_dir("volumes", resource->debugfs_res);
486 	if (IS_ERR_OR_NULL(dentry))
487 		goto fail;
488 	resource->debugfs_res_volumes = dentry;
489 
490 	dentry = debugfs_create_dir("connections", resource->debugfs_res);
491 	if (IS_ERR_OR_NULL(dentry))
492 		goto fail;
493 	resource->debugfs_res_connections = dentry;
494 
495 	dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
496 			resource->debugfs_res, resource,
497 			&in_flight_summary_fops);
498 	if (IS_ERR_OR_NULL(dentry))
499 		goto fail;
500 	resource->debugfs_res_in_flight_summary = dentry;
501 	return;
502 
503 fail:
504 	drbd_debugfs_resource_cleanup(resource);
505 	drbd_err(resource, "failed to create debugfs dentry\n");
506 }
507 
508 static void drbd_debugfs_remove(struct dentry **dp)
509 {
510 	debugfs_remove(*dp);
511 	*dp = NULL;
512 }
513 
514 void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
515 {
516 	/* it is ok to call debugfs_remove(NULL) */
517 	drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
518 	drbd_debugfs_remove(&resource->debugfs_res_connections);
519 	drbd_debugfs_remove(&resource->debugfs_res_volumes);
520 	drbd_debugfs_remove(&resource->debugfs_res);
521 }
522 
523 static void seq_print_one_timing_detail(struct seq_file *m,
524 	const struct drbd_thread_timing_details *tdp,
525 	unsigned long now)
526 {
527 	struct drbd_thread_timing_details td;
528 	/* No locking...
529 	 * use temporary assignment to get at consistent data. */
530 	do {
531 		td = *tdp;
532 	} while (td.cb_nr != tdp->cb_nr);
533 	if (!td.cb_addr)
534 		return;
535 	seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
536 			td.cb_nr,
537 			jiffies_to_msecs(now - td.start_jif),
538 			td.caller_fn, td.line,
539 			td.cb_addr);
540 }
541 
542 static void seq_print_timing_details(struct seq_file *m,
543 		const char *title,
544 		unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
545 {
546 	unsigned int start_idx;
547 	unsigned int i;
548 
549 	seq_printf(m, "%s\n", title);
550 	/* If not much is going on, this will result in natural ordering.
551 	 * If it is very busy, we will possibly skip events, or even see wrap
552 	 * arounds, which could only be avoided with locking.
553 	 */
554 	start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
555 	for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
556 		seq_print_one_timing_detail(m, tdp+i, now);
557 	for (i = 0; i < start_idx; i++)
558 		seq_print_one_timing_detail(m, tdp+i, now);
559 }
560 
561 static int callback_history_show(struct seq_file *m, void *ignored)
562 {
563 	struct drbd_connection *connection = m->private;
564 	unsigned long jif = jiffies;
565 
566 	/* BUMP me if you change the file format/content/presentation */
567 	seq_printf(m, "v: %u\n\n", 0);
568 
569 	seq_puts(m, "n\tage\tcallsite\tfn\n");
570 	seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
571 	seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
572 	return 0;
573 }
574 
575 static int callback_history_open(struct inode *inode, struct file *file)
576 {
577 	struct drbd_connection *connection = inode->i_private;
578 	return drbd_single_open(file, callback_history_show, connection,
579 				&connection->kref, drbd_destroy_connection);
580 }
581 
582 static int callback_history_release(struct inode *inode, struct file *file)
583 {
584 	struct drbd_connection *connection = inode->i_private;
585 	kref_put(&connection->kref, drbd_destroy_connection);
586 	return single_release(inode, file);
587 }
588 
589 static const struct file_operations connection_callback_history_fops = {
590 	.owner		= THIS_MODULE,
591 	.open		= callback_history_open,
592 	.read		= seq_read,
593 	.llseek		= seq_lseek,
594 	.release	= callback_history_release,
595 };
596 
597 static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
598 {
599 	struct drbd_connection *connection = m->private;
600 	unsigned long now = jiffies;
601 	struct drbd_request *r1, *r2;
602 
603 	/* BUMP me if you change the file format/content/presentation */
604 	seq_printf(m, "v: %u\n\n", 0);
605 
606 	spin_lock_irq(&connection->resource->req_lock);
607 	r1 = connection->req_next;
608 	if (r1)
609 		seq_print_minor_vnr_req(m, r1, now);
610 	r2 = connection->req_ack_pending;
611 	if (r2 && r2 != r1) {
612 		r1 = r2;
613 		seq_print_minor_vnr_req(m, r1, now);
614 	}
615 	r2 = connection->req_not_net_done;
616 	if (r2 && r2 != r1)
617 		seq_print_minor_vnr_req(m, r2, now);
618 	spin_unlock_irq(&connection->resource->req_lock);
619 	return 0;
620 }
621 
622 static int connection_oldest_requests_open(struct inode *inode, struct file *file)
623 {
624 	struct drbd_connection *connection = inode->i_private;
625 	return drbd_single_open(file, connection_oldest_requests_show, connection,
626 				&connection->kref, drbd_destroy_connection);
627 }
628 
629 static int connection_oldest_requests_release(struct inode *inode, struct file *file)
630 {
631 	struct drbd_connection *connection = inode->i_private;
632 	kref_put(&connection->kref, drbd_destroy_connection);
633 	return single_release(inode, file);
634 }
635 
636 static const struct file_operations connection_oldest_requests_fops = {
637 	.owner		= THIS_MODULE,
638 	.open		= connection_oldest_requests_open,
639 	.read		= seq_read,
640 	.llseek		= seq_lseek,
641 	.release	= connection_oldest_requests_release,
642 };
643 
644 void drbd_debugfs_connection_add(struct drbd_connection *connection)
645 {
646 	struct dentry *conns_dir = connection->resource->debugfs_res_connections;
647 	struct dentry *dentry;
648 	if (!conns_dir)
649 		return;
650 
651 	/* Once we enable mutliple peers,
652 	 * these connections will have descriptive names.
653 	 * For now, it is just the one connection to the (only) "peer". */
654 	dentry = debugfs_create_dir("peer", conns_dir);
655 	if (IS_ERR_OR_NULL(dentry))
656 		goto fail;
657 	connection->debugfs_conn = dentry;
658 
659 	dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
660 			connection->debugfs_conn, connection,
661 			&connection_callback_history_fops);
662 	if (IS_ERR_OR_NULL(dentry))
663 		goto fail;
664 	connection->debugfs_conn_callback_history = dentry;
665 
666 	dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP,
667 			connection->debugfs_conn, connection,
668 			&connection_oldest_requests_fops);
669 	if (IS_ERR_OR_NULL(dentry))
670 		goto fail;
671 	connection->debugfs_conn_oldest_requests = dentry;
672 	return;
673 
674 fail:
675 	drbd_debugfs_connection_cleanup(connection);
676 	drbd_err(connection, "failed to create debugfs dentry\n");
677 }
678 
679 void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
680 {
681 	drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
682 	drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
683 	drbd_debugfs_remove(&connection->debugfs_conn);
684 }
685 
686 static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
687 {
688        struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
689 
690        seq_printf(m, "%5d %s %s %s", bme->rs_left,
691 		  test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
692 		  test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
693 		  test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
694 		  );
695 }
696 
697 static int device_resync_extents_show(struct seq_file *m, void *ignored)
698 {
699 	struct drbd_device *device = m->private;
700 
701 	/* BUMP me if you change the file format/content/presentation */
702 	seq_printf(m, "v: %u\n\n", 0);
703 
704 	if (get_ldev_if_state(device, D_FAILED)) {
705 		lc_seq_printf_stats(m, device->resync);
706 		lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
707 		put_ldev(device);
708 	}
709 	return 0;
710 }
711 
712 static int device_act_log_extents_show(struct seq_file *m, void *ignored)
713 {
714 	struct drbd_device *device = m->private;
715 
716 	/* BUMP me if you change the file format/content/presentation */
717 	seq_printf(m, "v: %u\n\n", 0);
718 
719 	if (get_ldev_if_state(device, D_FAILED)) {
720 		lc_seq_printf_stats(m, device->act_log);
721 		lc_seq_dump_details(m, device->act_log, "", NULL);
722 		put_ldev(device);
723 	}
724 	return 0;
725 }
726 
727 static int device_oldest_requests_show(struct seq_file *m, void *ignored)
728 {
729 	struct drbd_device *device = m->private;
730 	struct drbd_resource *resource = device->resource;
731 	unsigned long now = jiffies;
732 	struct drbd_request *r1, *r2;
733 	int i;
734 
735 	/* BUMP me if you change the file format/content/presentation */
736 	seq_printf(m, "v: %u\n\n", 0);
737 
738 	seq_puts(m, RQ_HDR);
739 	spin_lock_irq(&resource->req_lock);
740 	/* WRITE, then READ */
741 	for (i = 1; i >= 0; --i) {
742 		r1 = list_first_entry_or_null(&device->pending_master_completion[i],
743 			struct drbd_request, req_pending_master_completion);
744 		r2 = list_first_entry_or_null(&device->pending_completion[i],
745 			struct drbd_request, req_pending_local);
746 		if (r1)
747 			seq_print_one_request(m, r1, now);
748 		if (r2 && r2 != r1)
749 			seq_print_one_request(m, r2, now);
750 	}
751 	spin_unlock_irq(&resource->req_lock);
752 	return 0;
753 }
754 
755 static int device_data_gen_id_show(struct seq_file *m, void *ignored)
756 {
757 	struct drbd_device *device = m->private;
758 	struct drbd_md *md;
759 	enum drbd_uuid_index idx;
760 
761 	if (!get_ldev_if_state(device, D_FAILED))
762 		return -ENODEV;
763 
764 	md = &device->ldev->md;
765 	spin_lock_irq(&md->uuid_lock);
766 	for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) {
767 		seq_printf(m, "0x%016llX\n", md->uuid[idx]);
768 	}
769 	spin_unlock_irq(&md->uuid_lock);
770 	put_ldev(device);
771 	return 0;
772 }
773 
774 #define drbd_debugfs_device_attr(name)						\
775 static int device_ ## name ## _open(struct inode *inode, struct file *file)	\
776 {										\
777 	struct drbd_device *device = inode->i_private;				\
778 	return drbd_single_open(file, device_ ## name ## _show, device,		\
779 				&device->kref, drbd_destroy_device);		\
780 }										\
781 static int device_ ## name ## _release(struct inode *inode, struct file *file)	\
782 {										\
783 	struct drbd_device *device = inode->i_private;				\
784 	kref_put(&device->kref, drbd_destroy_device);				\
785 	return single_release(inode, file);					\
786 }										\
787 static const struct file_operations device_ ## name ## _fops = {		\
788 	.owner		= THIS_MODULE,						\
789 	.open		= device_ ## name ## _open,				\
790 	.read		= seq_read,						\
791 	.llseek		= seq_lseek,						\
792 	.release	= device_ ## name ## _release,				\
793 };
794 
795 drbd_debugfs_device_attr(oldest_requests)
796 drbd_debugfs_device_attr(act_log_extents)
797 drbd_debugfs_device_attr(resync_extents)
798 drbd_debugfs_device_attr(data_gen_id)
799 
800 void drbd_debugfs_device_add(struct drbd_device *device)
801 {
802 	struct dentry *vols_dir = device->resource->debugfs_res_volumes;
803 	char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
804 	char vnr_buf[8];   /* volume number vnr is even 16 bit only; */
805 	char *slink_name = NULL;
806 
807 	struct dentry *dentry;
808 	if (!vols_dir || !drbd_debugfs_minors)
809 		return;
810 
811 	snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
812 	dentry = debugfs_create_dir(vnr_buf, vols_dir);
813 	if (IS_ERR_OR_NULL(dentry))
814 		goto fail;
815 	device->debugfs_vol = dentry;
816 
817 	snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
818 	slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
819 			device->resource->name, device->vnr);
820 	if (!slink_name)
821 		goto fail;
822 	dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
823 	kfree(slink_name);
824 	slink_name = NULL;
825 	if (IS_ERR_OR_NULL(dentry))
826 		goto fail;
827 	device->debugfs_minor = dentry;
828 
829 #define DCF(name)	do {					\
830 	dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP,	\
831 			device->debugfs_vol, device,		\
832 			&device_ ## name ## _fops);		\
833 	if (IS_ERR_OR_NULL(dentry))				\
834 		goto fail;					\
835 	device->debugfs_vol_ ## name = dentry;			\
836 	} while (0)
837 
838 	DCF(oldest_requests);
839 	DCF(act_log_extents);
840 	DCF(resync_extents);
841 	DCF(data_gen_id);
842 #undef DCF
843 	return;
844 
845 fail:
846 	drbd_debugfs_device_cleanup(device);
847 	drbd_err(device, "failed to create debugfs entries\n");
848 }
849 
850 void drbd_debugfs_device_cleanup(struct drbd_device *device)
851 {
852 	drbd_debugfs_remove(&device->debugfs_minor);
853 	drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
854 	drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
855 	drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
856 	drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
857 	drbd_debugfs_remove(&device->debugfs_vol);
858 }
859 
860 void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
861 {
862 	struct dentry *conn_dir = peer_device->connection->debugfs_conn;
863 	struct dentry *dentry;
864 	char vnr_buf[8];
865 
866 	if (!conn_dir)
867 		return;
868 
869 	snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
870 	dentry = debugfs_create_dir(vnr_buf, conn_dir);
871 	if (IS_ERR_OR_NULL(dentry))
872 		goto fail;
873 	peer_device->debugfs_peer_dev = dentry;
874 	return;
875 
876 fail:
877 	drbd_debugfs_peer_device_cleanup(peer_device);
878 	drbd_err(peer_device, "failed to create debugfs entries\n");
879 }
880 
881 void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
882 {
883 	drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
884 }
885 
886 static int drbd_version_show(struct seq_file *m, void *ignored)
887 {
888 	seq_printf(m, "# %s\n", drbd_buildtag());
889 	seq_printf(m, "VERSION=%s\n", REL_VERSION);
890 	seq_printf(m, "API_VERSION=%u\n", API_VERSION);
891 	seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
892 	seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
893 	return 0;
894 }
895 
896 static int drbd_version_open(struct inode *inode, struct file *file)
897 {
898 	return single_open(file, drbd_version_show, NULL);
899 }
900 
901 static struct file_operations drbd_version_fops = {
902 	.owner = THIS_MODULE,
903 	.open = drbd_version_open,
904 	.llseek = seq_lseek,
905 	.read = seq_read,
906 	.release = single_release,
907 };
908 
909 /* not __exit, may be indirectly called
910  * from the module-load-failure path as well. */
911 void drbd_debugfs_cleanup(void)
912 {
913 	drbd_debugfs_remove(&drbd_debugfs_resources);
914 	drbd_debugfs_remove(&drbd_debugfs_minors);
915 	drbd_debugfs_remove(&drbd_debugfs_version);
916 	drbd_debugfs_remove(&drbd_debugfs_root);
917 }
918 
919 int __init drbd_debugfs_init(void)
920 {
921 	struct dentry *dentry;
922 
923 	dentry = debugfs_create_dir("drbd", NULL);
924 	if (IS_ERR_OR_NULL(dentry))
925 		goto fail;
926 	drbd_debugfs_root = dentry;
927 
928 	dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
929 	if (IS_ERR_OR_NULL(dentry))
930 		goto fail;
931 	drbd_debugfs_version = dentry;
932 
933 	dentry = debugfs_create_dir("resources", drbd_debugfs_root);
934 	if (IS_ERR_OR_NULL(dentry))
935 		goto fail;
936 	drbd_debugfs_resources = dentry;
937 
938 	dentry = debugfs_create_dir("minors", drbd_debugfs_root);
939 	if (IS_ERR_OR_NULL(dentry))
940 		goto fail;
941 	drbd_debugfs_minors = dentry;
942 	return 0;
943 
944 fail:
945 	drbd_debugfs_cleanup();
946 	if (dentry)
947 		return PTR_ERR(dentry);
948 	else
949 		return -EINVAL;
950 }
951