xref: /linux/drivers/s390/cio/cmf.c (revision 858259cf7d1c443c836a2022b78cb281f0a9b95e)
1 /*
2  * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $)
3  *
4  * Linux on zSeries Channel Measurement Facility support
5  *
6  * Copyright 2000,2003 IBM Corporation
7  *
8  * Author: Arnd Bergmann <arndb@de.ibm.com>
9  *
10  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26 
27 #include <linux/bootmem.h>
28 #include <linux/device.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/slab.h>
34 #include <linux/timex.h>	/* get_clock() */
35 
36 #include <asm/ccwdev.h>
37 #include <asm/cio.h>
38 #include <asm/cmb.h>
39 #include <asm/div64.h>
40 
41 #include "cio.h"
42 #include "css.h"
43 #include "device.h"
44 #include "ioasm.h"
45 #include "chsc.h"
46 
47 /* parameter to enable cmf during boot, possible uses are:
48  *  "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
49  *               used on any subchannel
50  *  "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
51  *                     <num> subchannel, where <num> is an integer
52  *                     between 1 and 65535, default is 1024
53  */
54 #define ARGSTRING "s390cmf"
55 
56 /* indices for READCMB */
57 enum cmb_index {
58  /* basic and exended format: */
59 	cmb_ssch_rsch_count,
60 	cmb_sample_count,
61 	cmb_device_connect_time,
62 	cmb_function_pending_time,
63 	cmb_device_disconnect_time,
64 	cmb_control_unit_queuing_time,
65 	cmb_device_active_only_time,
66  /* extended format only: */
67 	cmb_device_busy_time,
68 	cmb_initial_command_response_time,
69 };
70 
71 /**
72  * enum cmb_format - types of supported measurement block formats
73  *
74  * @CMF_BASIC:      traditional channel measurement blocks supported
75  * 		    by all machines that we run on
76  * @CMF_EXTENDED:   improved format that was introduced with the z990
77  * 		    machine
78  * @CMF_AUTODETECT: default: use extended format when running on a z990
79  *                  or later machine, otherwise fall back to basic format
80  **/
81 enum cmb_format {
82 	CMF_BASIC,
83 	CMF_EXTENDED,
84 	CMF_AUTODETECT = -1,
85 };
86 /**
87  * format - actual format for all measurement blocks
88  *
89  * The format module parameter can be set to a value of 0 (zero)
90  * or 1, indicating basic or extended format as described for
91  * enum cmb_format.
92  */
93 static int format = CMF_AUTODETECT;
94 module_param(format, bool, 0444);
95 
96 /**
97  * struct cmb_operations - functions to use depending on cmb_format
98  *
99  * all these functions operate on a struct cmf_device. There is only
100  * one instance of struct cmb_operations because all cmf_device
101  * objects are guaranteed to be of the same type.
102  *
103  * @alloc:	allocate memory for a channel measurement block,
104  *		either with the help of a special pool or with kmalloc
105  * @free:	free memory allocated with @alloc
106  * @set:	enable or disable measurement
107  * @readall:	read a measurement block in a common format
108  * @reset:	clear the data in the associated measurement block and
109  *		reset its time stamp
110  */
111 struct cmb_operations {
112 	int (*alloc)  (struct ccw_device*);
113 	void(*free)   (struct ccw_device*);
114 	int (*set)    (struct ccw_device*, u32);
115 	u64 (*read)   (struct ccw_device*, int);
116 	int (*readall)(struct ccw_device*, struct cmbdata *);
117 	void (*reset) (struct ccw_device*);
118 
119 	struct attribute_group *attr_group;
120 };
121 static struct cmb_operations *cmbops;
122 
123 /* our user interface is designed in terms of nanoseconds,
124  * while the hardware measures total times in its own
125  * unit.*/
126 static inline u64 time_to_nsec(u32 value)
127 {
128 	return ((u64)value) * 128000ull;
129 }
130 
131 /*
132  * Users are usually interested in average times,
133  * not accumulated time.
134  * This also helps us with atomicity problems
135  * when reading sinlge values.
136  */
137 static inline u64 time_to_avg_nsec(u32 value, u32 count)
138 {
139 	u64 ret;
140 
141 	/* no samples yet, avoid division by 0 */
142 	if (count == 0)
143 		return 0;
144 
145 	/* value comes in units of 128 �sec */
146 	ret = time_to_nsec(value);
147 	do_div(ret, count);
148 
149 	return ret;
150 }
151 
152 /* activate or deactivate the channel monitor. When area is NULL,
153  * the monitor is deactivated. The channel monitor needs to
154  * be active in order to measure subchannels, which also need
155  * to be enabled. */
156 static inline void
157 cmf_activate(void *area, unsigned int onoff)
158 {
159 	register void * __gpr2 asm("2");
160 	register long __gpr1 asm("1");
161 
162 	__gpr2 = area;
163 	__gpr1 = onoff ? 2 : 0;
164 	/* activate channel measurement */
165 	asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
166 }
167 
168 static int
169 set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
170 {
171 	int ret;
172 	int retry;
173 	struct subchannel *sch;
174 	struct schib *schib;
175 
176 	sch = to_subchannel(cdev->dev.parent);
177 	schib = &sch->schib;
178 	/* msch can silently fail, so do it again if necessary */
179 	for (retry = 0; retry < 3; retry++) {
180 		/* prepare schib */
181 		stsch(sch->irq, schib);
182 		schib->pmcw.mme  = mme;
183 		schib->pmcw.mbfc = mbfc;
184 		/* address can be either a block address or a block index */
185 		if (mbfc)
186 			schib->mba = address;
187 		else
188 			schib->pmcw.mbi = address;
189 
190 		/* try to submit it */
191 		switch(ret = msch_err(sch->irq, schib)) {
192 			case 0:
193 				break;
194 			case 1:
195 			case 2: /* in I/O or status pending */
196 				ret = -EBUSY;
197 				break;
198 			case 3: /* subchannel is no longer valid */
199 				ret = -ENODEV;
200 				break;
201 			default: /* msch caught an exception */
202 				ret = -EINVAL;
203 				break;
204 		}
205 		stsch(sch->irq, schib); /* restore the schib */
206 
207 		if (ret)
208 			break;
209 
210 		/* check if it worked */
211 		if (schib->pmcw.mme  == mme &&
212 		    schib->pmcw.mbfc == mbfc &&
213 		    (mbfc ? (schib->mba == address)
214 			  : (schib->pmcw.mbi == address)))
215 			return 0;
216 
217 		ret = -EINVAL;
218 	}
219 
220 	return ret;
221 }
222 
223 struct set_schib_struct {
224 	u32 mme;
225 	int mbfc;
226 	unsigned long address;
227 	wait_queue_head_t wait;
228 	int ret;
229 };
230 
231 static int set_schib_wait(struct ccw_device *cdev, u32 mme,
232 				int mbfc, unsigned long address)
233 {
234 	struct set_schib_struct s = {
235 		.mme = mme,
236 		.mbfc = mbfc,
237 		.address = address,
238 		.wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
239 	};
240 
241 	spin_lock_irq(cdev->ccwlock);
242 	s.ret = set_schib(cdev, mme, mbfc, address);
243 	if (s.ret != -EBUSY) {
244 		goto out_nowait;
245 	}
246 
247 	if (cdev->private->state != DEV_STATE_ONLINE) {
248 		s.ret = -EBUSY;
249 		/* if the device is not online, don't even try again */
250 		goto out_nowait;
251 	}
252 	cdev->private->state = DEV_STATE_CMFCHANGE;
253 	cdev->private->cmb_wait = &s;
254 	s.ret = 1;
255 
256 	spin_unlock_irq(cdev->ccwlock);
257 	if (wait_event_interruptible(s.wait, s.ret != 1)) {
258 		spin_lock_irq(cdev->ccwlock);
259 		if (s.ret == 1) {
260 			s.ret = -ERESTARTSYS;
261 			cdev->private->cmb_wait = 0;
262 			if (cdev->private->state == DEV_STATE_CMFCHANGE)
263 				cdev->private->state = DEV_STATE_ONLINE;
264 		}
265 		spin_unlock_irq(cdev->ccwlock);
266 	}
267 	return s.ret;
268 
269 out_nowait:
270 	spin_unlock_irq(cdev->ccwlock);
271 	return s.ret;
272 }
273 
274 void retry_set_schib(struct ccw_device *cdev)
275 {
276 	struct set_schib_struct *s;
277 
278 	s = cdev->private->cmb_wait;
279 	cdev->private->cmb_wait = 0;
280 	if (!s) {
281 		WARN_ON(1);
282 		return;
283 	}
284 	s->ret = set_schib(cdev, s->mme, s->mbfc, s->address);
285 	wake_up(&s->wait);
286 }
287 
288 /**
289  * struct cmb_area - container for global cmb data
290  *
291  * @mem:	pointer to CMBs (only in basic measurement mode)
292  * @list:	contains a linked list of all subchannels
293  * @lock:	protect concurrent access to @mem and @list
294  */
295 struct cmb_area {
296 	struct cmb *mem;
297 	struct list_head list;
298 	int num_channels;
299 	spinlock_t lock;
300 };
301 
302 static struct cmb_area cmb_area = {
303 	.lock = SPIN_LOCK_UNLOCKED,
304 	.list = LIST_HEAD_INIT(cmb_area.list),
305 	.num_channels  = 1024,
306 };
307 
308 
309 /* ****** old style CMB handling ********/
310 
311 /** int maxchannels
312  *
313  * Basic channel measurement blocks are allocated in one contiguous
314  * block of memory, which can not be moved as long as any channel
315  * is active. Therefore, a maximum number of subchannels needs to
316  * be defined somewhere. This is a module parameter, defaulting to
317  * a resonable value of 1024, or 32 kb of memory.
318  * Current kernels don't allow kmalloc with more than 128kb, so the
319  * maximum is 4096
320  */
321 
322 module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
323 
324 /**
325  * struct cmb - basic channel measurement block
326  *
327  * cmb as used by the hardware the fields are described in z/Architecture
328  * Principles of Operation, chapter 17.
329  * The area to be a contiguous array and may not be reallocated or freed.
330  * Only one cmb area can be present in the system.
331  */
332 struct cmb {
333 	u16 ssch_rsch_count;
334 	u16 sample_count;
335 	u32 device_connect_time;
336 	u32 function_pending_time;
337 	u32 device_disconnect_time;
338 	u32 control_unit_queuing_time;
339 	u32 device_active_only_time;
340 	u32 reserved[2];
341 };
342 
343 /* insert a single device into the cmb_area list
344  * called with cmb_area.lock held from alloc_cmb
345  */
346 static inline int
347 alloc_cmb_single (struct ccw_device *cdev)
348 {
349 	struct cmb *cmb;
350 	struct ccw_device_private *node;
351 	int ret;
352 
353 	spin_lock_irq(cdev->ccwlock);
354 	if (!list_empty(&cdev->private->cmb_list)) {
355 		ret = -EBUSY;
356 		goto out;
357 	}
358 
359 	/* find first unused cmb in cmb_area.mem.
360 	 * this is a little tricky: cmb_area.list
361 	 * remains sorted by ->cmb pointers */
362 	cmb = cmb_area.mem;
363 	list_for_each_entry(node, &cmb_area.list, cmb_list) {
364 		if ((struct cmb*)node->cmb > cmb)
365 			break;
366 		cmb++;
367 	}
368 	if (cmb - cmb_area.mem >= cmb_area.num_channels) {
369 		ret = -ENOMEM;
370 		goto out;
371 	}
372 
373 	/* insert new cmb */
374 	list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
375 	cdev->private->cmb = cmb;
376 	ret = 0;
377 out:
378 	spin_unlock_irq(cdev->ccwlock);
379 	return ret;
380 }
381 
382 static int
383 alloc_cmb (struct ccw_device *cdev)
384 {
385 	int ret;
386 	struct cmb *mem;
387 	ssize_t size;
388 
389 	spin_lock(&cmb_area.lock);
390 
391 	if (!cmb_area.mem) {
392 		/* there is no user yet, so we need a new area */
393 		size = sizeof(struct cmb) * cmb_area.num_channels;
394 		WARN_ON(!list_empty(&cmb_area.list));
395 
396 		spin_unlock(&cmb_area.lock);
397 		mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
398 				 get_order(size));
399 		spin_lock(&cmb_area.lock);
400 
401 		if (cmb_area.mem) {
402 			/* ok, another thread was faster */
403 			free_pages((unsigned long)mem, get_order(size));
404 		} else if (!mem) {
405 			/* no luck */
406 			ret = -ENOMEM;
407 			goto out;
408 		} else {
409 			/* everything ok */
410 			memset(mem, 0, size);
411 			cmb_area.mem = mem;
412 			cmf_activate(cmb_area.mem, 1);
413 		}
414 	}
415 
416 	/* do the actual allocation */
417 	ret = alloc_cmb_single(cdev);
418 out:
419 	spin_unlock(&cmb_area.lock);
420 
421 	return ret;
422 }
423 
424 static void
425 free_cmb(struct ccw_device *cdev)
426 {
427 	struct ccw_device_private *priv;
428 
429 	priv = cdev->private;
430 
431 	spin_lock(&cmb_area.lock);
432 	spin_lock_irq(cdev->ccwlock);
433 
434 	if (list_empty(&priv->cmb_list)) {
435 		/* already freed */
436 		goto out;
437 	}
438 
439 	priv->cmb = NULL;
440 	list_del_init(&priv->cmb_list);
441 
442 	if (list_empty(&cmb_area.list)) {
443 		ssize_t size;
444 		size = sizeof(struct cmb) * cmb_area.num_channels;
445 		cmf_activate(NULL, 0);
446 		free_pages((unsigned long)cmb_area.mem, get_order(size));
447 		cmb_area.mem = NULL;
448 	}
449 out:
450 	spin_unlock_irq(cdev->ccwlock);
451 	spin_unlock(&cmb_area.lock);
452 }
453 
454 static int
455 set_cmb(struct ccw_device *cdev, u32 mme)
456 {
457 	u16 offset;
458 
459 	if (!cdev->private->cmb)
460 		return -EINVAL;
461 
462 	offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0;
463 
464 	return set_schib_wait(cdev, mme, 0, offset);
465 }
466 
467 static u64
468 read_cmb (struct ccw_device *cdev, int index)
469 {
470 	/* yes, we have to put it on the stack
471 	 * because the cmb must only be accessed
472 	 * atomically, e.g. with mvc */
473 	struct cmb cmb;
474 	unsigned long flags;
475 	u32 val;
476 
477 	spin_lock_irqsave(cdev->ccwlock, flags);
478 	if (!cdev->private->cmb) {
479 		spin_unlock_irqrestore(cdev->ccwlock, flags);
480 		return 0;
481 	}
482 
483 	cmb = *(struct cmb*)cdev->private->cmb;
484 	spin_unlock_irqrestore(cdev->ccwlock, flags);
485 
486 	switch (index) {
487 	case cmb_ssch_rsch_count:
488 		return cmb.ssch_rsch_count;
489 	case cmb_sample_count:
490 		return cmb.sample_count;
491 	case cmb_device_connect_time:
492 		val = cmb.device_connect_time;
493 		break;
494 	case cmb_function_pending_time:
495 		val = cmb.function_pending_time;
496 		break;
497 	case cmb_device_disconnect_time:
498 		val = cmb.device_disconnect_time;
499 		break;
500 	case cmb_control_unit_queuing_time:
501 		val = cmb.control_unit_queuing_time;
502 		break;
503 	case cmb_device_active_only_time:
504 		val = cmb.device_active_only_time;
505 		break;
506 	default:
507 		return 0;
508 	}
509 	return time_to_avg_nsec(val, cmb.sample_count);
510 }
511 
512 static int
513 readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
514 {
515 	/* yes, we have to put it on the stack
516 	 * because the cmb must only be accessed
517 	 * atomically, e.g. with mvc */
518 	struct cmb cmb;
519 	unsigned long flags;
520 	u64 time;
521 
522 	spin_lock_irqsave(cdev->ccwlock, flags);
523 	if (!cdev->private->cmb) {
524 		spin_unlock_irqrestore(cdev->ccwlock, flags);
525 		return -ENODEV;
526 	}
527 
528 	cmb = *(struct cmb*)cdev->private->cmb;
529 	time = get_clock() - cdev->private->cmb_start_time;
530 	spin_unlock_irqrestore(cdev->ccwlock, flags);
531 
532 	memset(data, 0, sizeof(struct cmbdata));
533 
534 	/* we only know values before device_busy_time */
535 	data->size = offsetof(struct cmbdata, device_busy_time);
536 
537 	/* convert to nanoseconds */
538 	data->elapsed_time = (time * 1000) >> 12;
539 
540 	/* copy data to new structure */
541 	data->ssch_rsch_count = cmb.ssch_rsch_count;
542 	data->sample_count = cmb.sample_count;
543 
544 	/* time fields are converted to nanoseconds while copying */
545 	data->device_connect_time = time_to_nsec(cmb.device_connect_time);
546 	data->function_pending_time = time_to_nsec(cmb.function_pending_time);
547 	data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
548 	data->control_unit_queuing_time
549 		= time_to_nsec(cmb.control_unit_queuing_time);
550 	data->device_active_only_time
551 		= time_to_nsec(cmb.device_active_only_time);
552 
553 	return 0;
554 }
555 
556 static void
557 reset_cmb(struct ccw_device *cdev)
558 {
559 	struct cmb *cmb;
560 	spin_lock_irq(cdev->ccwlock);
561 	cmb = cdev->private->cmb;
562 	if (cmb)
563 		memset (cmb, 0, sizeof (*cmb));
564 	cdev->private->cmb_start_time = get_clock();
565 	spin_unlock_irq(cdev->ccwlock);
566 }
567 
568 static struct attribute_group cmf_attr_group;
569 
570 static struct cmb_operations cmbops_basic = {
571 	.alloc	= alloc_cmb,
572 	.free	= free_cmb,
573 	.set	= set_cmb,
574 	.read	= read_cmb,
575 	.readall    = readall_cmb,
576 	.reset	    = reset_cmb,
577 	.attr_group = &cmf_attr_group,
578 };
579 
580 /* ******** extended cmb handling ********/
581 
582 /**
583  * struct cmbe - extended channel measurement block
584  *
585  * cmb as used by the hardware, may be in any 64 bit physical location,
586  * the fields are described in z/Architecture Principles of Operation,
587  * third edition, chapter 17.
588  */
589 struct cmbe {
590 	u32 ssch_rsch_count;
591 	u32 sample_count;
592 	u32 device_connect_time;
593 	u32 function_pending_time;
594 	u32 device_disconnect_time;
595 	u32 control_unit_queuing_time;
596 	u32 device_active_only_time;
597 	u32 device_busy_time;
598 	u32 initial_command_response_time;
599 	u32 reserved[7];
600 };
601 
602 /* kmalloc only guarantees 8 byte alignment, but we need cmbe
603  * pointers to be naturally aligned. Make sure to allocate
604  * enough space for two cmbes */
605 static inline struct cmbe* cmbe_align(struct cmbe *c)
606 {
607 	unsigned long addr;
608 	addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
609 				 ~(sizeof (struct cmbe) - sizeof(long));
610 	return (struct cmbe*)addr;
611 }
612 
613 static int
614 alloc_cmbe (struct ccw_device *cdev)
615 {
616 	struct cmbe *cmbe;
617 	cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
618 	if (!cmbe)
619 		return -ENOMEM;
620 
621 	spin_lock_irq(cdev->ccwlock);
622 	if (cdev->private->cmb) {
623 		kfree(cmbe);
624 		spin_unlock_irq(cdev->ccwlock);
625 		return -EBUSY;
626 	}
627 
628 	cdev->private->cmb = cmbe;
629 	spin_unlock_irq(cdev->ccwlock);
630 
631 	/* activate global measurement if this is the first channel */
632 	spin_lock(&cmb_area.lock);
633 	if (list_empty(&cmb_area.list))
634 		cmf_activate(NULL, 1);
635 	list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
636 	spin_unlock(&cmb_area.lock);
637 
638 	return 0;
639 }
640 
641 static void
642 free_cmbe (struct ccw_device *cdev)
643 {
644 	spin_lock_irq(cdev->ccwlock);
645 	if (cdev->private->cmb)
646 		kfree(cdev->private->cmb);
647 	cdev->private->cmb = NULL;
648 	spin_unlock_irq(cdev->ccwlock);
649 
650 	/* deactivate global measurement if this is the last channel */
651 	spin_lock(&cmb_area.lock);
652 	list_del_init(&cdev->private->cmb_list);
653 	if (list_empty(&cmb_area.list))
654 		cmf_activate(NULL, 0);
655 	spin_unlock(&cmb_area.lock);
656 }
657 
658 static int
659 set_cmbe(struct ccw_device *cdev, u32 mme)
660 {
661 	unsigned long mba;
662 
663 	if (!cdev->private->cmb)
664 		return -EINVAL;
665 	mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0;
666 
667 	return set_schib_wait(cdev, mme, 1, mba);
668 }
669 
670 
671 u64
672 read_cmbe (struct ccw_device *cdev, int index)
673 {
674 	/* yes, we have to put it on the stack
675 	 * because the cmb must only be accessed
676 	 * atomically, e.g. with mvc */
677 	struct cmbe cmb;
678 	unsigned long flags;
679 	u32 val;
680 
681 	spin_lock_irqsave(cdev->ccwlock, flags);
682 	if (!cdev->private->cmb) {
683 		spin_unlock_irqrestore(cdev->ccwlock, flags);
684 		return 0;
685 	}
686 
687 	cmb = *cmbe_align(cdev->private->cmb);
688 	spin_unlock_irqrestore(cdev->ccwlock, flags);
689 
690 	switch (index) {
691 	case cmb_ssch_rsch_count:
692 		return cmb.ssch_rsch_count;
693 	case cmb_sample_count:
694 		return cmb.sample_count;
695 	case cmb_device_connect_time:
696 		val = cmb.device_connect_time;
697 		break;
698 	case cmb_function_pending_time:
699 		val = cmb.function_pending_time;
700 		break;
701 	case cmb_device_disconnect_time:
702 		val = cmb.device_disconnect_time;
703 		break;
704 	case cmb_control_unit_queuing_time:
705 		val = cmb.control_unit_queuing_time;
706 		break;
707 	case cmb_device_active_only_time:
708 		val = cmb.device_active_only_time;
709 		break;
710 	case cmb_device_busy_time:
711 		val = cmb.device_busy_time;
712 		break;
713 	case cmb_initial_command_response_time:
714 		val = cmb.initial_command_response_time;
715 		break;
716 	default:
717 		return 0;
718 	}
719 	return time_to_avg_nsec(val, cmb.sample_count);
720 }
721 
722 static int
723 readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
724 {
725 	/* yes, we have to put it on the stack
726 	 * because the cmb must only be accessed
727 	 * atomically, e.g. with mvc */
728 	struct cmbe cmb;
729 	unsigned long flags;
730 	u64 time;
731 
732 	spin_lock_irqsave(cdev->ccwlock, flags);
733 	if (!cdev->private->cmb) {
734 		spin_unlock_irqrestore(cdev->ccwlock, flags);
735 		return -ENODEV;
736 	}
737 
738 	cmb = *cmbe_align(cdev->private->cmb);
739 	time = get_clock() - cdev->private->cmb_start_time;
740 	spin_unlock_irqrestore(cdev->ccwlock, flags);
741 
742 	memset (data, 0, sizeof(struct cmbdata));
743 
744 	/* we only know values before device_busy_time */
745 	data->size = offsetof(struct cmbdata, device_busy_time);
746 
747 	/* conver to nanoseconds */
748 	data->elapsed_time = (time * 1000) >> 12;
749 
750 	/* copy data to new structure */
751 	data->ssch_rsch_count = cmb.ssch_rsch_count;
752 	data->sample_count = cmb.sample_count;
753 
754 	/* time fields are converted to nanoseconds while copying */
755 	data->device_connect_time = time_to_nsec(cmb.device_connect_time);
756 	data->function_pending_time = time_to_nsec(cmb.function_pending_time);
757 	data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
758 	data->control_unit_queuing_time
759 		= time_to_nsec(cmb.control_unit_queuing_time);
760 	data->device_active_only_time
761 		= time_to_nsec(cmb.device_active_only_time);
762 	data->device_busy_time = time_to_nsec(cmb.device_busy_time);
763 	data->initial_command_response_time
764 		= time_to_nsec(cmb.initial_command_response_time);
765 
766 	return 0;
767 }
768 
769 static void
770 reset_cmbe(struct ccw_device *cdev)
771 {
772 	struct cmbe *cmb;
773 	spin_lock_irq(cdev->ccwlock);
774 	cmb = cmbe_align(cdev->private->cmb);
775 	if (cmb)
776 		memset (cmb, 0, sizeof (*cmb));
777 	cdev->private->cmb_start_time = get_clock();
778 	spin_unlock_irq(cdev->ccwlock);
779 }
780 
781 static struct attribute_group cmf_attr_group_ext;
782 
783 static struct cmb_operations cmbops_extended = {
784 	.alloc	    = alloc_cmbe,
785 	.free	    = free_cmbe,
786 	.set	    = set_cmbe,
787 	.read	    = read_cmbe,
788 	.readall    = readall_cmbe,
789 	.reset	    = reset_cmbe,
790 	.attr_group = &cmf_attr_group_ext,
791 };
792 
793 
794 static ssize_t
795 cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
796 {
797 	return sprintf(buf, "%lld\n",
798 		(unsigned long long) cmf_read(to_ccwdev(dev), idx));
799 }
800 
801 static ssize_t
802 cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf)
803 {
804 	struct ccw_device *cdev;
805 	long interval;
806 	unsigned long count;
807 
808 	cdev = to_ccwdev(dev);
809 	interval  = get_clock() - cdev->private->cmb_start_time;
810 	count = cmf_read(cdev, cmb_sample_count);
811 	if (count)
812 		interval /= count;
813 	else
814 		interval = -1;
815 	return sprintf(buf, "%ld\n", interval);
816 }
817 
818 static ssize_t
819 cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf)
820 {
821 	struct cmbdata data;
822 	u64 utilization;
823 	unsigned long t, u;
824 	int ret;
825 
826 	ret = cmf_readall(to_ccwdev(dev), &data);
827 	if (ret)
828 		return ret;
829 
830 	utilization = data.device_connect_time +
831 		      data.function_pending_time +
832 		      data.device_disconnect_time;
833 
834 	/* shift to avoid long long division */
835 	while (-1ul < (data.elapsed_time | utilization)) {
836 		utilization >>= 8;
837 		data.elapsed_time >>= 8;
838 	}
839 
840 	/* calculate value in 0.1 percent units */
841 	t = (unsigned long) data.elapsed_time / 1000;
842 	u = (unsigned long) utilization / t;
843 
844 	return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
845 }
846 
847 #define cmf_attr(name) \
848 static ssize_t show_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
849 { return cmb_show_attr((dev), buf, cmb_ ## name); } \
850 static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
851 
852 #define cmf_attr_avg(name) \
853 static ssize_t show_avg_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
854 { return cmb_show_attr((dev), buf, cmb_ ## name); } \
855 static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
856 
857 cmf_attr(ssch_rsch_count);
858 cmf_attr(sample_count);
859 cmf_attr_avg(device_connect_time);
860 cmf_attr_avg(function_pending_time);
861 cmf_attr_avg(device_disconnect_time);
862 cmf_attr_avg(control_unit_queuing_time);
863 cmf_attr_avg(device_active_only_time);
864 cmf_attr_avg(device_busy_time);
865 cmf_attr_avg(initial_command_response_time);
866 
867 static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
868 static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
869 
870 static struct attribute *cmf_attributes[] = {
871 	&dev_attr_avg_sample_interval.attr,
872 	&dev_attr_avg_utilization.attr,
873 	&dev_attr_ssch_rsch_count.attr,
874 	&dev_attr_sample_count.attr,
875 	&dev_attr_avg_device_connect_time.attr,
876 	&dev_attr_avg_function_pending_time.attr,
877 	&dev_attr_avg_device_disconnect_time.attr,
878 	&dev_attr_avg_control_unit_queuing_time.attr,
879 	&dev_attr_avg_device_active_only_time.attr,
880 	0,
881 };
882 
883 static struct attribute_group cmf_attr_group = {
884 	.name  = "cmf",
885 	.attrs = cmf_attributes,
886 };
887 
888 static struct attribute *cmf_attributes_ext[] = {
889 	&dev_attr_avg_sample_interval.attr,
890 	&dev_attr_avg_utilization.attr,
891 	&dev_attr_ssch_rsch_count.attr,
892 	&dev_attr_sample_count.attr,
893 	&dev_attr_avg_device_connect_time.attr,
894 	&dev_attr_avg_function_pending_time.attr,
895 	&dev_attr_avg_device_disconnect_time.attr,
896 	&dev_attr_avg_control_unit_queuing_time.attr,
897 	&dev_attr_avg_device_active_only_time.attr,
898 	&dev_attr_avg_device_busy_time.attr,
899 	&dev_attr_avg_initial_command_response_time.attr,
900 	0,
901 };
902 
903 static struct attribute_group cmf_attr_group_ext = {
904 	.name  = "cmf",
905 	.attrs = cmf_attributes_ext,
906 };
907 
908 static ssize_t cmb_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
909 {
910 	return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
911 }
912 
913 static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t c)
914 {
915 	struct ccw_device *cdev;
916 	int ret;
917 
918 	cdev = to_ccwdev(dev);
919 
920 	switch (buf[0]) {
921 	case '0':
922 		ret = disable_cmf(cdev);
923 		if (ret)
924 			printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
925 		break;
926 	case '1':
927 		ret = enable_cmf(cdev);
928 		if (ret && ret != -EBUSY)
929 			printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
930 		break;
931 	}
932 
933 	return c;
934 }
935 
936 DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
937 
938 /* enable_cmf/disable_cmf: module interface for cmf (de)activation */
939 int
940 enable_cmf(struct ccw_device *cdev)
941 {
942 	int ret;
943 
944 	ret = cmbops->alloc(cdev);
945 	cmbops->reset(cdev);
946 	if (ret)
947 		return ret;
948 	ret = cmbops->set(cdev, 2);
949 	if (ret) {
950 		cmbops->free(cdev);
951 		return ret;
952 	}
953 	ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
954 	if (!ret)
955 		return 0;
956 	cmbops->set(cdev, 0);  //FIXME: this can fail
957 	cmbops->free(cdev);
958 	return ret;
959 }
960 
961 int
962 disable_cmf(struct ccw_device *cdev)
963 {
964 	int ret;
965 
966 	ret = cmbops->set(cdev, 0);
967 	if (ret)
968 		return ret;
969 	cmbops->free(cdev);
970 	sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
971 	return ret;
972 }
973 
974 u64
975 cmf_read(struct ccw_device *cdev, int index)
976 {
977 	return cmbops->read(cdev, index);
978 }
979 
980 int
981 cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
982 {
983 	return cmbops->readall(cdev, data);
984 }
985 
986 static int __init
987 init_cmf(void)
988 {
989 	char *format_string;
990 	char *detect_string = "parameter";
991 
992 	/* We cannot really autoprobe this. If the user did not give a parameter,
993 	   see if we are running on z990 or up, otherwise fall back to basic mode. */
994 
995 	if (format == CMF_AUTODETECT) {
996 		if (!css_characteristics_avail ||
997 		    !css_general_characteristics.ext_mb) {
998 			format = CMF_BASIC;
999 		} else {
1000 			format = CMF_EXTENDED;
1001 		}
1002 		detect_string = "autodetected";
1003 	} else {
1004 		detect_string = "parameter";
1005 	}
1006 
1007 	switch (format) {
1008 	case CMF_BASIC:
1009 		format_string = "basic";
1010 		cmbops = &cmbops_basic;
1011 		if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
1012 			printk(KERN_ERR "Basic channel measurement facility"
1013 					" can only use 1 to 4096 devices\n"
1014 			       KERN_ERR "when the cmf driver is built"
1015 					" as a loadable module\n");
1016 			return 1;
1017 		}
1018 		break;
1019 	case CMF_EXTENDED:
1020  		format_string = "extended";
1021 		cmbops = &cmbops_extended;
1022 		break;
1023 	default:
1024 		printk(KERN_ERR "Invalid format %d for channel "
1025 			"measurement facility\n", format);
1026 		return 1;
1027 	}
1028 
1029 	printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
1030 		format_string, detect_string);
1031 	return 0;
1032 }
1033 
1034 module_init(init_cmf);
1035 
1036 
1037 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1038 MODULE_LICENSE("GPL");
1039 MODULE_DESCRIPTION("channel measurement facility base driver\n"
1040 		   "Copyright 2003 IBM Corporation\n");
1041 
1042 EXPORT_SYMBOL_GPL(enable_cmf);
1043 EXPORT_SYMBOL_GPL(disable_cmf);
1044 EXPORT_SYMBOL_GPL(cmf_read);
1045 EXPORT_SYMBOL_GPL(cmf_readall);
1046