xref: /linux/drivers/s390/cio/cmf.c (revision 2a2c74b2efcb1a0ca3fdcb5fbb96ad8de6a29177)
1 /*
2  * Linux on zSeries Channel Measurement Facility support
3  *
4  * Copyright IBM Corp. 2000, 2006
5  *
6  * Authors: Arnd Bergmann <arndb@de.ibm.com>
7  *	    Cornelia Huck <cornelia.huck@de.ibm.com>
8  *
9  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 #define KMSG_COMPONENT "cio"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 
29 #include <linux/bootmem.h>
30 #include <linux/device.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/slab.h>
36 #include <linux/timex.h>	/* get_tod_clock() */
37 
38 #include <asm/ccwdev.h>
39 #include <asm/cio.h>
40 #include <asm/cmb.h>
41 #include <asm/div64.h>
42 
43 #include "cio.h"
44 #include "css.h"
45 #include "device.h"
46 #include "ioasm.h"
47 #include "chsc.h"
48 
49 /*
50  * parameter to enable cmf during boot, possible uses are:
51  *  "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
52  *               used on any subchannel
53  *  "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
54  *                     <num> subchannel, where <num> is an integer
55  *                     between 1 and 65535, default is 1024
56  */
57 #define ARGSTRING "s390cmf"
58 
59 /* indices for READCMB */
60 enum cmb_index {
61  /* basic and exended format: */
62 	cmb_ssch_rsch_count,
63 	cmb_sample_count,
64 	cmb_device_connect_time,
65 	cmb_function_pending_time,
66 	cmb_device_disconnect_time,
67 	cmb_control_unit_queuing_time,
68 	cmb_device_active_only_time,
69  /* extended format only: */
70 	cmb_device_busy_time,
71 	cmb_initial_command_response_time,
72 };
73 
74 /**
75  * enum cmb_format - types of supported measurement block formats
76  *
77  * @CMF_BASIC:      traditional channel measurement blocks supported
78  *		    by all machines that we run on
79  * @CMF_EXTENDED:   improved format that was introduced with the z990
80  *		    machine
81  * @CMF_AUTODETECT: default: use extended format when running on a machine
82  *		    supporting extended format, otherwise fall back to
83  *		    basic format
84  */
85 enum cmb_format {
86 	CMF_BASIC,
87 	CMF_EXTENDED,
88 	CMF_AUTODETECT = -1,
89 };
90 
91 /*
92  * format - actual format for all measurement blocks
93  *
94  * The format module parameter can be set to a value of 0 (zero)
95  * or 1, indicating basic or extended format as described for
96  * enum cmb_format.
97  */
98 static int format = CMF_AUTODETECT;
99 module_param(format, bint, 0444);
100 
101 /**
102  * struct cmb_operations - functions to use depending on cmb_format
103  *
104  * Most of these functions operate on a struct ccw_device. There is only
105  * one instance of struct cmb_operations because the format of the measurement
106  * data is guaranteed to be the same for every ccw_device.
107  *
108  * @alloc:	allocate memory for a channel measurement block,
109  *		either with the help of a special pool or with kmalloc
110  * @free:	free memory allocated with @alloc
111  * @set:	enable or disable measurement
112  * @read:	read a measurement entry at an index
113  * @readall:	read a measurement block in a common format
114  * @reset:	clear the data in the associated measurement block and
115  *		reset its time stamp
116  * @align:	align an allocated block so that the hardware can use it
117  */
118 struct cmb_operations {
119 	int  (*alloc)  (struct ccw_device *);
120 	void (*free)   (struct ccw_device *);
121 	int  (*set)    (struct ccw_device *, u32);
122 	u64  (*read)   (struct ccw_device *, int);
123 	int  (*readall)(struct ccw_device *, struct cmbdata *);
124 	void (*reset)  (struct ccw_device *);
125 	void *(*align) (void *);
126 /* private: */
127 	struct attribute_group *attr_group;
128 };
129 static struct cmb_operations *cmbops;
130 
131 struct cmb_data {
132 	void *hw_block;   /* Pointer to block updated by hardware */
133 	void *last_block; /* Last changed block copied from hardware block */
134 	int size;	  /* Size of hw_block and last_block */
135 	unsigned long long last_update;  /* when last_block was updated */
136 };
137 
138 /*
139  * Our user interface is designed in terms of nanoseconds,
140  * while the hardware measures total times in its own
141  * unit.
142  */
143 static inline u64 time_to_nsec(u32 value)
144 {
145 	return ((u64)value) * 128000ull;
146 }
147 
148 /*
149  * Users are usually interested in average times,
150  * not accumulated time.
151  * This also helps us with atomicity problems
152  * when reading sinlge values.
153  */
154 static inline u64 time_to_avg_nsec(u32 value, u32 count)
155 {
156 	u64 ret;
157 
158 	/* no samples yet, avoid division by 0 */
159 	if (count == 0)
160 		return 0;
161 
162 	/* value comes in units of 128 µsec */
163 	ret = time_to_nsec(value);
164 	do_div(ret, count);
165 
166 	return ret;
167 }
168 
169 /*
170  * Activate or deactivate the channel monitor. When area is NULL,
171  * the monitor is deactivated. The channel monitor needs to
172  * be active in order to measure subchannels, which also need
173  * to be enabled.
174  */
175 static inline void cmf_activate(void *area, unsigned int onoff)
176 {
177 	register void * __gpr2 asm("2");
178 	register long __gpr1 asm("1");
179 
180 	__gpr2 = area;
181 	__gpr1 = onoff ? 2 : 0;
182 	/* activate channel measurement */
183 	asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
184 }
185 
186 static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
187 		     unsigned long address)
188 {
189 	struct subchannel *sch;
190 
191 	sch = to_subchannel(cdev->dev.parent);
192 
193 	sch->config.mme = mme;
194 	sch->config.mbfc = mbfc;
195 	/* address can be either a block address or a block index */
196 	if (mbfc)
197 		sch->config.mba = address;
198 	else
199 		sch->config.mbi = address;
200 
201 	return cio_commit_config(sch);
202 }
203 
204 struct set_schib_struct {
205 	u32 mme;
206 	int mbfc;
207 	unsigned long address;
208 	wait_queue_head_t wait;
209 	int ret;
210 	struct kref kref;
211 };
212 
213 static void cmf_set_schib_release(struct kref *kref)
214 {
215 	struct set_schib_struct *set_data;
216 
217 	set_data = container_of(kref, struct set_schib_struct, kref);
218 	kfree(set_data);
219 }
220 
221 #define CMF_PENDING 1
222 
223 static int set_schib_wait(struct ccw_device *cdev, u32 mme,
224 				int mbfc, unsigned long address)
225 {
226 	struct set_schib_struct *set_data;
227 	int ret;
228 
229 	spin_lock_irq(cdev->ccwlock);
230 	if (!cdev->private->cmb) {
231 		ret = -ENODEV;
232 		goto out;
233 	}
234 	set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
235 	if (!set_data) {
236 		ret = -ENOMEM;
237 		goto out;
238 	}
239 	init_waitqueue_head(&set_data->wait);
240 	kref_init(&set_data->kref);
241 	set_data->mme = mme;
242 	set_data->mbfc = mbfc;
243 	set_data->address = address;
244 
245 	ret = set_schib(cdev, mme, mbfc, address);
246 	if (ret != -EBUSY)
247 		goto out_put;
248 
249 	if (cdev->private->state != DEV_STATE_ONLINE) {
250 		/* if the device is not online, don't even try again */
251 		ret = -EBUSY;
252 		goto out_put;
253 	}
254 
255 	cdev->private->state = DEV_STATE_CMFCHANGE;
256 	set_data->ret = CMF_PENDING;
257 	cdev->private->cmb_wait = set_data;
258 
259 	spin_unlock_irq(cdev->ccwlock);
260 	if (wait_event_interruptible(set_data->wait,
261 				     set_data->ret != CMF_PENDING)) {
262 		spin_lock_irq(cdev->ccwlock);
263 		if (set_data->ret == CMF_PENDING) {
264 			set_data->ret = -ERESTARTSYS;
265 			if (cdev->private->state == DEV_STATE_CMFCHANGE)
266 				cdev->private->state = DEV_STATE_ONLINE;
267 		}
268 		spin_unlock_irq(cdev->ccwlock);
269 	}
270 	spin_lock_irq(cdev->ccwlock);
271 	cdev->private->cmb_wait = NULL;
272 	ret = set_data->ret;
273 out_put:
274 	kref_put(&set_data->kref, cmf_set_schib_release);
275 out:
276 	spin_unlock_irq(cdev->ccwlock);
277 	return ret;
278 }
279 
280 void retry_set_schib(struct ccw_device *cdev)
281 {
282 	struct set_schib_struct *set_data;
283 
284 	set_data = cdev->private->cmb_wait;
285 	if (!set_data) {
286 		WARN_ON(1);
287 		return;
288 	}
289 	kref_get(&set_data->kref);
290 	set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
291 				  set_data->address);
292 	wake_up(&set_data->wait);
293 	kref_put(&set_data->kref, cmf_set_schib_release);
294 }
295 
296 static int cmf_copy_block(struct ccw_device *cdev)
297 {
298 	struct subchannel *sch;
299 	void *reference_buf;
300 	void *hw_block;
301 	struct cmb_data *cmb_data;
302 
303 	sch = to_subchannel(cdev->dev.parent);
304 
305 	if (cio_update_schib(sch))
306 		return -ENODEV;
307 
308 	if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
309 		/* Don't copy if a start function is in progress. */
310 		if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
311 		    (scsw_actl(&sch->schib.scsw) &
312 		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
313 		    (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
314 			return -EBUSY;
315 	}
316 	cmb_data = cdev->private->cmb;
317 	hw_block = cmbops->align(cmb_data->hw_block);
318 	if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
319 		/* No need to copy. */
320 		return 0;
321 	reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
322 	if (!reference_buf)
323 		return -ENOMEM;
324 	/* Ensure consistency of block copied from hardware. */
325 	do {
326 		memcpy(cmb_data->last_block, hw_block, cmb_data->size);
327 		memcpy(reference_buf, hw_block, cmb_data->size);
328 	} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
329 	cmb_data->last_update = get_tod_clock();
330 	kfree(reference_buf);
331 	return 0;
332 }
333 
334 struct copy_block_struct {
335 	wait_queue_head_t wait;
336 	int ret;
337 	struct kref kref;
338 };
339 
340 static void cmf_copy_block_release(struct kref *kref)
341 {
342 	struct copy_block_struct *copy_block;
343 
344 	copy_block = container_of(kref, struct copy_block_struct, kref);
345 	kfree(copy_block);
346 }
347 
348 static int cmf_cmb_copy_wait(struct ccw_device *cdev)
349 {
350 	struct copy_block_struct *copy_block;
351 	int ret;
352 	unsigned long flags;
353 
354 	spin_lock_irqsave(cdev->ccwlock, flags);
355 	if (!cdev->private->cmb) {
356 		ret = -ENODEV;
357 		goto out;
358 	}
359 	copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
360 	if (!copy_block) {
361 		ret = -ENOMEM;
362 		goto out;
363 	}
364 	init_waitqueue_head(&copy_block->wait);
365 	kref_init(&copy_block->kref);
366 
367 	ret = cmf_copy_block(cdev);
368 	if (ret != -EBUSY)
369 		goto out_put;
370 
371 	if (cdev->private->state != DEV_STATE_ONLINE) {
372 		ret = -EBUSY;
373 		goto out_put;
374 	}
375 
376 	cdev->private->state = DEV_STATE_CMFUPDATE;
377 	copy_block->ret = CMF_PENDING;
378 	cdev->private->cmb_wait = copy_block;
379 
380 	spin_unlock_irqrestore(cdev->ccwlock, flags);
381 	if (wait_event_interruptible(copy_block->wait,
382 				     copy_block->ret != CMF_PENDING)) {
383 		spin_lock_irqsave(cdev->ccwlock, flags);
384 		if (copy_block->ret == CMF_PENDING) {
385 			copy_block->ret = -ERESTARTSYS;
386 			if (cdev->private->state == DEV_STATE_CMFUPDATE)
387 				cdev->private->state = DEV_STATE_ONLINE;
388 		}
389 		spin_unlock_irqrestore(cdev->ccwlock, flags);
390 	}
391 	spin_lock_irqsave(cdev->ccwlock, flags);
392 	cdev->private->cmb_wait = NULL;
393 	ret = copy_block->ret;
394 out_put:
395 	kref_put(&copy_block->kref, cmf_copy_block_release);
396 out:
397 	spin_unlock_irqrestore(cdev->ccwlock, flags);
398 	return ret;
399 }
400 
401 void cmf_retry_copy_block(struct ccw_device *cdev)
402 {
403 	struct copy_block_struct *copy_block;
404 
405 	copy_block = cdev->private->cmb_wait;
406 	if (!copy_block) {
407 		WARN_ON(1);
408 		return;
409 	}
410 	kref_get(&copy_block->kref);
411 	copy_block->ret = cmf_copy_block(cdev);
412 	wake_up(&copy_block->wait);
413 	kref_put(&copy_block->kref, cmf_copy_block_release);
414 }
415 
416 static void cmf_generic_reset(struct ccw_device *cdev)
417 {
418 	struct cmb_data *cmb_data;
419 
420 	spin_lock_irq(cdev->ccwlock);
421 	cmb_data = cdev->private->cmb;
422 	if (cmb_data) {
423 		memset(cmb_data->last_block, 0, cmb_data->size);
424 		/*
425 		 * Need to reset hw block as well to make the hardware start
426 		 * from 0 again.
427 		 */
428 		memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
429 		cmb_data->last_update = 0;
430 	}
431 	cdev->private->cmb_start_time = get_tod_clock();
432 	spin_unlock_irq(cdev->ccwlock);
433 }
434 
435 /**
436  * struct cmb_area - container for global cmb data
437  *
438  * @mem:	pointer to CMBs (only in basic measurement mode)
439  * @list:	contains a linked list of all subchannels
440  * @num_channels: number of channels to be measured
441  * @lock:	protect concurrent access to @mem and @list
442  */
443 struct cmb_area {
444 	struct cmb *mem;
445 	struct list_head list;
446 	int num_channels;
447 	spinlock_t lock;
448 };
449 
450 static struct cmb_area cmb_area = {
451 	.lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
452 	.list = LIST_HEAD_INIT(cmb_area.list),
453 	.num_channels  = 1024,
454 };
455 
456 /* ****** old style CMB handling ********/
457 
458 /*
459  * Basic channel measurement blocks are allocated in one contiguous
460  * block of memory, which can not be moved as long as any channel
461  * is active. Therefore, a maximum number of subchannels needs to
462  * be defined somewhere. This is a module parameter, defaulting to
463  * a reasonable value of 1024, or 32 kb of memory.
464  * Current kernels don't allow kmalloc with more than 128kb, so the
465  * maximum is 4096.
466  */
467 
468 module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
469 
470 /**
471  * struct cmb - basic channel measurement block
472  * @ssch_rsch_count: number of ssch and rsch
473  * @sample_count: number of samples
474  * @device_connect_time: time of device connect
475  * @function_pending_time: time of function pending
476  * @device_disconnect_time: time of device disconnect
477  * @control_unit_queuing_time: time of control unit queuing
478  * @device_active_only_time: time of device active only
479  * @reserved: unused in basic measurement mode
480  *
481  * The measurement block as used by the hardware. The fields are described
482  * further in z/Architecture Principles of Operation, chapter 17.
483  *
484  * The cmb area made up from these blocks must be a contiguous array and may
485  * not be reallocated or freed.
486  * Only one cmb area can be present in the system.
487  */
488 struct cmb {
489 	u16 ssch_rsch_count;
490 	u16 sample_count;
491 	u32 device_connect_time;
492 	u32 function_pending_time;
493 	u32 device_disconnect_time;
494 	u32 control_unit_queuing_time;
495 	u32 device_active_only_time;
496 	u32 reserved[2];
497 };
498 
499 /*
500  * Insert a single device into the cmb_area list.
501  * Called with cmb_area.lock held from alloc_cmb.
502  */
503 static int alloc_cmb_single(struct ccw_device *cdev,
504 			    struct cmb_data *cmb_data)
505 {
506 	struct cmb *cmb;
507 	struct ccw_device_private *node;
508 	int ret;
509 
510 	spin_lock_irq(cdev->ccwlock);
511 	if (!list_empty(&cdev->private->cmb_list)) {
512 		ret = -EBUSY;
513 		goto out;
514 	}
515 
516 	/*
517 	 * Find first unused cmb in cmb_area.mem.
518 	 * This is a little tricky: cmb_area.list
519 	 * remains sorted by ->cmb->hw_data pointers.
520 	 */
521 	cmb = cmb_area.mem;
522 	list_for_each_entry(node, &cmb_area.list, cmb_list) {
523 		struct cmb_data *data;
524 		data = node->cmb;
525 		if ((struct cmb*)data->hw_block > cmb)
526 			break;
527 		cmb++;
528 	}
529 	if (cmb - cmb_area.mem >= cmb_area.num_channels) {
530 		ret = -ENOMEM;
531 		goto out;
532 	}
533 
534 	/* insert new cmb */
535 	list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
536 	cmb_data->hw_block = cmb;
537 	cdev->private->cmb = cmb_data;
538 	ret = 0;
539 out:
540 	spin_unlock_irq(cdev->ccwlock);
541 	return ret;
542 }
543 
544 static int alloc_cmb(struct ccw_device *cdev)
545 {
546 	int ret;
547 	struct cmb *mem;
548 	ssize_t size;
549 	struct cmb_data *cmb_data;
550 
551 	/* Allocate private cmb_data. */
552 	cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
553 	if (!cmb_data)
554 		return -ENOMEM;
555 
556 	cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
557 	if (!cmb_data->last_block) {
558 		kfree(cmb_data);
559 		return -ENOMEM;
560 	}
561 	cmb_data->size = sizeof(struct cmb);
562 	spin_lock(&cmb_area.lock);
563 
564 	if (!cmb_area.mem) {
565 		/* there is no user yet, so we need a new area */
566 		size = sizeof(struct cmb) * cmb_area.num_channels;
567 		WARN_ON(!list_empty(&cmb_area.list));
568 
569 		spin_unlock(&cmb_area.lock);
570 		mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
571 				 get_order(size));
572 		spin_lock(&cmb_area.lock);
573 
574 		if (cmb_area.mem) {
575 			/* ok, another thread was faster */
576 			free_pages((unsigned long)mem, get_order(size));
577 		} else if (!mem) {
578 			/* no luck */
579 			ret = -ENOMEM;
580 			goto out;
581 		} else {
582 			/* everything ok */
583 			memset(mem, 0, size);
584 			cmb_area.mem = mem;
585 			cmf_activate(cmb_area.mem, 1);
586 		}
587 	}
588 
589 	/* do the actual allocation */
590 	ret = alloc_cmb_single(cdev, cmb_data);
591 out:
592 	spin_unlock(&cmb_area.lock);
593 	if (ret) {
594 		kfree(cmb_data->last_block);
595 		kfree(cmb_data);
596 	}
597 	return ret;
598 }
599 
600 static void free_cmb(struct ccw_device *cdev)
601 {
602 	struct ccw_device_private *priv;
603 	struct cmb_data *cmb_data;
604 
605 	spin_lock(&cmb_area.lock);
606 	spin_lock_irq(cdev->ccwlock);
607 
608 	priv = cdev->private;
609 
610 	if (list_empty(&priv->cmb_list)) {
611 		/* already freed */
612 		goto out;
613 	}
614 
615 	cmb_data = priv->cmb;
616 	priv->cmb = NULL;
617 	if (cmb_data)
618 		kfree(cmb_data->last_block);
619 	kfree(cmb_data);
620 	list_del_init(&priv->cmb_list);
621 
622 	if (list_empty(&cmb_area.list)) {
623 		ssize_t size;
624 		size = sizeof(struct cmb) * cmb_area.num_channels;
625 		cmf_activate(NULL, 0);
626 		free_pages((unsigned long)cmb_area.mem, get_order(size));
627 		cmb_area.mem = NULL;
628 	}
629 out:
630 	spin_unlock_irq(cdev->ccwlock);
631 	spin_unlock(&cmb_area.lock);
632 }
633 
634 static int set_cmb(struct ccw_device *cdev, u32 mme)
635 {
636 	u16 offset;
637 	struct cmb_data *cmb_data;
638 	unsigned long flags;
639 
640 	spin_lock_irqsave(cdev->ccwlock, flags);
641 	if (!cdev->private->cmb) {
642 		spin_unlock_irqrestore(cdev->ccwlock, flags);
643 		return -EINVAL;
644 	}
645 	cmb_data = cdev->private->cmb;
646 	offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
647 	spin_unlock_irqrestore(cdev->ccwlock, flags);
648 
649 	return set_schib_wait(cdev, mme, 0, offset);
650 }
651 
652 static u64 read_cmb(struct ccw_device *cdev, int index)
653 {
654 	struct cmb *cmb;
655 	u32 val;
656 	int ret;
657 	unsigned long flags;
658 
659 	ret = cmf_cmb_copy_wait(cdev);
660 	if (ret < 0)
661 		return 0;
662 
663 	spin_lock_irqsave(cdev->ccwlock, flags);
664 	if (!cdev->private->cmb) {
665 		ret = 0;
666 		goto out;
667 	}
668 	cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
669 
670 	switch (index) {
671 	case cmb_ssch_rsch_count:
672 		ret = cmb->ssch_rsch_count;
673 		goto out;
674 	case cmb_sample_count:
675 		ret = cmb->sample_count;
676 		goto out;
677 	case cmb_device_connect_time:
678 		val = cmb->device_connect_time;
679 		break;
680 	case cmb_function_pending_time:
681 		val = cmb->function_pending_time;
682 		break;
683 	case cmb_device_disconnect_time:
684 		val = cmb->device_disconnect_time;
685 		break;
686 	case cmb_control_unit_queuing_time:
687 		val = cmb->control_unit_queuing_time;
688 		break;
689 	case cmb_device_active_only_time:
690 		val = cmb->device_active_only_time;
691 		break;
692 	default:
693 		ret = 0;
694 		goto out;
695 	}
696 	ret = time_to_avg_nsec(val, cmb->sample_count);
697 out:
698 	spin_unlock_irqrestore(cdev->ccwlock, flags);
699 	return ret;
700 }
701 
702 static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
703 {
704 	struct cmb *cmb;
705 	struct cmb_data *cmb_data;
706 	u64 time;
707 	unsigned long flags;
708 	int ret;
709 
710 	ret = cmf_cmb_copy_wait(cdev);
711 	if (ret < 0)
712 		return ret;
713 	spin_lock_irqsave(cdev->ccwlock, flags);
714 	cmb_data = cdev->private->cmb;
715 	if (!cmb_data) {
716 		ret = -ENODEV;
717 		goto out;
718 	}
719 	if (cmb_data->last_update == 0) {
720 		ret = -EAGAIN;
721 		goto out;
722 	}
723 	cmb = cmb_data->last_block;
724 	time = cmb_data->last_update - cdev->private->cmb_start_time;
725 
726 	memset(data, 0, sizeof(struct cmbdata));
727 
728 	/* we only know values before device_busy_time */
729 	data->size = offsetof(struct cmbdata, device_busy_time);
730 
731 	/* convert to nanoseconds */
732 	data->elapsed_time = (time * 1000) >> 12;
733 
734 	/* copy data to new structure */
735 	data->ssch_rsch_count = cmb->ssch_rsch_count;
736 	data->sample_count = cmb->sample_count;
737 
738 	/* time fields are converted to nanoseconds while copying */
739 	data->device_connect_time = time_to_nsec(cmb->device_connect_time);
740 	data->function_pending_time = time_to_nsec(cmb->function_pending_time);
741 	data->device_disconnect_time =
742 		time_to_nsec(cmb->device_disconnect_time);
743 	data->control_unit_queuing_time
744 		= time_to_nsec(cmb->control_unit_queuing_time);
745 	data->device_active_only_time
746 		= time_to_nsec(cmb->device_active_only_time);
747 	ret = 0;
748 out:
749 	spin_unlock_irqrestore(cdev->ccwlock, flags);
750 	return ret;
751 }
752 
753 static void reset_cmb(struct ccw_device *cdev)
754 {
755 	cmf_generic_reset(cdev);
756 }
757 
758 static void * align_cmb(void *area)
759 {
760 	return area;
761 }
762 
763 static struct attribute_group cmf_attr_group;
764 
765 static struct cmb_operations cmbops_basic = {
766 	.alloc	= alloc_cmb,
767 	.free	= free_cmb,
768 	.set	= set_cmb,
769 	.read	= read_cmb,
770 	.readall    = readall_cmb,
771 	.reset	    = reset_cmb,
772 	.align	    = align_cmb,
773 	.attr_group = &cmf_attr_group,
774 };
775 
776 /* ******** extended cmb handling ********/
777 
778 /**
779  * struct cmbe - extended channel measurement block
780  * @ssch_rsch_count: number of ssch and rsch
781  * @sample_count: number of samples
782  * @device_connect_time: time of device connect
783  * @function_pending_time: time of function pending
784  * @device_disconnect_time: time of device disconnect
785  * @control_unit_queuing_time: time of control unit queuing
786  * @device_active_only_time: time of device active only
787  * @device_busy_time: time of device busy
788  * @initial_command_response_time: initial command response time
789  * @reserved: unused
790  *
791  * The measurement block as used by the hardware. May be in any 64 bit physical
792  * location.
793  * The fields are described further in z/Architecture Principles of Operation,
794  * third edition, chapter 17.
795  */
796 struct cmbe {
797 	u32 ssch_rsch_count;
798 	u32 sample_count;
799 	u32 device_connect_time;
800 	u32 function_pending_time;
801 	u32 device_disconnect_time;
802 	u32 control_unit_queuing_time;
803 	u32 device_active_only_time;
804 	u32 device_busy_time;
805 	u32 initial_command_response_time;
806 	u32 reserved[7];
807 };
808 
809 /*
810  * kmalloc only guarantees 8 byte alignment, but we need cmbe
811  * pointers to be naturally aligned. Make sure to allocate
812  * enough space for two cmbes.
813  */
814 static inline struct cmbe *cmbe_align(struct cmbe *c)
815 {
816 	unsigned long addr;
817 	addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
818 				 ~(sizeof (struct cmbe) - sizeof(long));
819 	return (struct cmbe*)addr;
820 }
821 
822 static int alloc_cmbe(struct ccw_device *cdev)
823 {
824 	struct cmbe *cmbe;
825 	struct cmb_data *cmb_data;
826 	int ret;
827 
828 	cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
829 	if (!cmbe)
830 		return -ENOMEM;
831 	cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
832 	if (!cmb_data) {
833 		ret = -ENOMEM;
834 		goto out_free;
835 	}
836 	cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
837 	if (!cmb_data->last_block) {
838 		ret = -ENOMEM;
839 		goto out_free;
840 	}
841 	cmb_data->size = sizeof(struct cmbe);
842 	spin_lock_irq(cdev->ccwlock);
843 	if (cdev->private->cmb) {
844 		spin_unlock_irq(cdev->ccwlock);
845 		ret = -EBUSY;
846 		goto out_free;
847 	}
848 	cmb_data->hw_block = cmbe;
849 	cdev->private->cmb = cmb_data;
850 	spin_unlock_irq(cdev->ccwlock);
851 
852 	/* activate global measurement if this is the first channel */
853 	spin_lock(&cmb_area.lock);
854 	if (list_empty(&cmb_area.list))
855 		cmf_activate(NULL, 1);
856 	list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
857 	spin_unlock(&cmb_area.lock);
858 
859 	return 0;
860 out_free:
861 	if (cmb_data)
862 		kfree(cmb_data->last_block);
863 	kfree(cmb_data);
864 	kfree(cmbe);
865 	return ret;
866 }
867 
868 static void free_cmbe(struct ccw_device *cdev)
869 {
870 	struct cmb_data *cmb_data;
871 
872 	spin_lock_irq(cdev->ccwlock);
873 	cmb_data = cdev->private->cmb;
874 	cdev->private->cmb = NULL;
875 	if (cmb_data)
876 		kfree(cmb_data->last_block);
877 	kfree(cmb_data);
878 	spin_unlock_irq(cdev->ccwlock);
879 
880 	/* deactivate global measurement if this is the last channel */
881 	spin_lock(&cmb_area.lock);
882 	list_del_init(&cdev->private->cmb_list);
883 	if (list_empty(&cmb_area.list))
884 		cmf_activate(NULL, 0);
885 	spin_unlock(&cmb_area.lock);
886 }
887 
888 static int set_cmbe(struct ccw_device *cdev, u32 mme)
889 {
890 	unsigned long mba;
891 	struct cmb_data *cmb_data;
892 	unsigned long flags;
893 
894 	spin_lock_irqsave(cdev->ccwlock, flags);
895 	if (!cdev->private->cmb) {
896 		spin_unlock_irqrestore(cdev->ccwlock, flags);
897 		return -EINVAL;
898 	}
899 	cmb_data = cdev->private->cmb;
900 	mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
901 	spin_unlock_irqrestore(cdev->ccwlock, flags);
902 
903 	return set_schib_wait(cdev, mme, 1, mba);
904 }
905 
906 
907 static u64 read_cmbe(struct ccw_device *cdev, int index)
908 {
909 	struct cmbe *cmb;
910 	struct cmb_data *cmb_data;
911 	u32 val;
912 	int ret;
913 	unsigned long flags;
914 
915 	ret = cmf_cmb_copy_wait(cdev);
916 	if (ret < 0)
917 		return 0;
918 
919 	spin_lock_irqsave(cdev->ccwlock, flags);
920 	cmb_data = cdev->private->cmb;
921 	if (!cmb_data) {
922 		ret = 0;
923 		goto out;
924 	}
925 	cmb = cmb_data->last_block;
926 
927 	switch (index) {
928 	case cmb_ssch_rsch_count:
929 		ret = cmb->ssch_rsch_count;
930 		goto out;
931 	case cmb_sample_count:
932 		ret = cmb->sample_count;
933 		goto out;
934 	case cmb_device_connect_time:
935 		val = cmb->device_connect_time;
936 		break;
937 	case cmb_function_pending_time:
938 		val = cmb->function_pending_time;
939 		break;
940 	case cmb_device_disconnect_time:
941 		val = cmb->device_disconnect_time;
942 		break;
943 	case cmb_control_unit_queuing_time:
944 		val = cmb->control_unit_queuing_time;
945 		break;
946 	case cmb_device_active_only_time:
947 		val = cmb->device_active_only_time;
948 		break;
949 	case cmb_device_busy_time:
950 		val = cmb->device_busy_time;
951 		break;
952 	case cmb_initial_command_response_time:
953 		val = cmb->initial_command_response_time;
954 		break;
955 	default:
956 		ret = 0;
957 		goto out;
958 	}
959 	ret = time_to_avg_nsec(val, cmb->sample_count);
960 out:
961 	spin_unlock_irqrestore(cdev->ccwlock, flags);
962 	return ret;
963 }
964 
965 static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
966 {
967 	struct cmbe *cmb;
968 	struct cmb_data *cmb_data;
969 	u64 time;
970 	unsigned long flags;
971 	int ret;
972 
973 	ret = cmf_cmb_copy_wait(cdev);
974 	if (ret < 0)
975 		return ret;
976 	spin_lock_irqsave(cdev->ccwlock, flags);
977 	cmb_data = cdev->private->cmb;
978 	if (!cmb_data) {
979 		ret = -ENODEV;
980 		goto out;
981 	}
982 	if (cmb_data->last_update == 0) {
983 		ret = -EAGAIN;
984 		goto out;
985 	}
986 	time = cmb_data->last_update - cdev->private->cmb_start_time;
987 
988 	memset (data, 0, sizeof(struct cmbdata));
989 
990 	/* we only know values before device_busy_time */
991 	data->size = offsetof(struct cmbdata, device_busy_time);
992 
993 	/* conver to nanoseconds */
994 	data->elapsed_time = (time * 1000) >> 12;
995 
996 	cmb = cmb_data->last_block;
997 	/* copy data to new structure */
998 	data->ssch_rsch_count = cmb->ssch_rsch_count;
999 	data->sample_count = cmb->sample_count;
1000 
1001 	/* time fields are converted to nanoseconds while copying */
1002 	data->device_connect_time = time_to_nsec(cmb->device_connect_time);
1003 	data->function_pending_time = time_to_nsec(cmb->function_pending_time);
1004 	data->device_disconnect_time =
1005 		time_to_nsec(cmb->device_disconnect_time);
1006 	data->control_unit_queuing_time
1007 		= time_to_nsec(cmb->control_unit_queuing_time);
1008 	data->device_active_only_time
1009 		= time_to_nsec(cmb->device_active_only_time);
1010 	data->device_busy_time = time_to_nsec(cmb->device_busy_time);
1011 	data->initial_command_response_time
1012 		= time_to_nsec(cmb->initial_command_response_time);
1013 
1014 	ret = 0;
1015 out:
1016 	spin_unlock_irqrestore(cdev->ccwlock, flags);
1017 	return ret;
1018 }
1019 
1020 static void reset_cmbe(struct ccw_device *cdev)
1021 {
1022 	cmf_generic_reset(cdev);
1023 }
1024 
1025 static void * align_cmbe(void *area)
1026 {
1027 	return cmbe_align(area);
1028 }
1029 
1030 static struct attribute_group cmf_attr_group_ext;
1031 
1032 static struct cmb_operations cmbops_extended = {
1033 	.alloc	    = alloc_cmbe,
1034 	.free	    = free_cmbe,
1035 	.set	    = set_cmbe,
1036 	.read	    = read_cmbe,
1037 	.readall    = readall_cmbe,
1038 	.reset	    = reset_cmbe,
1039 	.align	    = align_cmbe,
1040 	.attr_group = &cmf_attr_group_ext,
1041 };
1042 
1043 static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
1044 {
1045 	return sprintf(buf, "%lld\n",
1046 		(unsigned long long) cmf_read(to_ccwdev(dev), idx));
1047 }
1048 
1049 static ssize_t cmb_show_avg_sample_interval(struct device *dev,
1050 					    struct device_attribute *attr,
1051 					    char *buf)
1052 {
1053 	struct ccw_device *cdev;
1054 	long interval;
1055 	unsigned long count;
1056 	struct cmb_data *cmb_data;
1057 
1058 	cdev = to_ccwdev(dev);
1059 	count = cmf_read(cdev, cmb_sample_count);
1060 	spin_lock_irq(cdev->ccwlock);
1061 	cmb_data = cdev->private->cmb;
1062 	if (count) {
1063 		interval = cmb_data->last_update -
1064 			cdev->private->cmb_start_time;
1065 		interval = (interval * 1000) >> 12;
1066 		interval /= count;
1067 	} else
1068 		interval = -1;
1069 	spin_unlock_irq(cdev->ccwlock);
1070 	return sprintf(buf, "%ld\n", interval);
1071 }
1072 
1073 static ssize_t cmb_show_avg_utilization(struct device *dev,
1074 					struct device_attribute *attr,
1075 					char *buf)
1076 {
1077 	struct cmbdata data;
1078 	u64 utilization;
1079 	unsigned long t, u;
1080 	int ret;
1081 
1082 	ret = cmf_readall(to_ccwdev(dev), &data);
1083 	if (ret == -EAGAIN || ret == -ENODEV)
1084 		/* No data (yet/currently) available to use for calculation. */
1085 		return sprintf(buf, "n/a\n");
1086 	else if (ret)
1087 		return ret;
1088 
1089 	utilization = data.device_connect_time +
1090 		      data.function_pending_time +
1091 		      data.device_disconnect_time;
1092 
1093 	/* shift to avoid long long division */
1094 	while (-1ul < (data.elapsed_time | utilization)) {
1095 		utilization >>= 8;
1096 		data.elapsed_time >>= 8;
1097 	}
1098 
1099 	/* calculate value in 0.1 percent units */
1100 	t = (unsigned long) data.elapsed_time / 1000;
1101 	u = (unsigned long) utilization / t;
1102 
1103 	return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
1104 }
1105 
1106 #define cmf_attr(name) \
1107 static ssize_t show_##name(struct device *dev, \
1108 			   struct device_attribute *attr, char *buf)	\
1109 { return cmb_show_attr((dev), buf, cmb_##name); } \
1110 static DEVICE_ATTR(name, 0444, show_##name, NULL);
1111 
1112 #define cmf_attr_avg(name) \
1113 static ssize_t show_avg_##name(struct device *dev, \
1114 			       struct device_attribute *attr, char *buf) \
1115 { return cmb_show_attr((dev), buf, cmb_##name); } \
1116 static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
1117 
1118 cmf_attr(ssch_rsch_count);
1119 cmf_attr(sample_count);
1120 cmf_attr_avg(device_connect_time);
1121 cmf_attr_avg(function_pending_time);
1122 cmf_attr_avg(device_disconnect_time);
1123 cmf_attr_avg(control_unit_queuing_time);
1124 cmf_attr_avg(device_active_only_time);
1125 cmf_attr_avg(device_busy_time);
1126 cmf_attr_avg(initial_command_response_time);
1127 
1128 static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
1129 		   NULL);
1130 static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
1131 
1132 static struct attribute *cmf_attributes[] = {
1133 	&dev_attr_avg_sample_interval.attr,
1134 	&dev_attr_avg_utilization.attr,
1135 	&dev_attr_ssch_rsch_count.attr,
1136 	&dev_attr_sample_count.attr,
1137 	&dev_attr_avg_device_connect_time.attr,
1138 	&dev_attr_avg_function_pending_time.attr,
1139 	&dev_attr_avg_device_disconnect_time.attr,
1140 	&dev_attr_avg_control_unit_queuing_time.attr,
1141 	&dev_attr_avg_device_active_only_time.attr,
1142 	NULL,
1143 };
1144 
1145 static struct attribute_group cmf_attr_group = {
1146 	.name  = "cmf",
1147 	.attrs = cmf_attributes,
1148 };
1149 
1150 static struct attribute *cmf_attributes_ext[] = {
1151 	&dev_attr_avg_sample_interval.attr,
1152 	&dev_attr_avg_utilization.attr,
1153 	&dev_attr_ssch_rsch_count.attr,
1154 	&dev_attr_sample_count.attr,
1155 	&dev_attr_avg_device_connect_time.attr,
1156 	&dev_attr_avg_function_pending_time.attr,
1157 	&dev_attr_avg_device_disconnect_time.attr,
1158 	&dev_attr_avg_control_unit_queuing_time.attr,
1159 	&dev_attr_avg_device_active_only_time.attr,
1160 	&dev_attr_avg_device_busy_time.attr,
1161 	&dev_attr_avg_initial_command_response_time.attr,
1162 	NULL,
1163 };
1164 
1165 static struct attribute_group cmf_attr_group_ext = {
1166 	.name  = "cmf",
1167 	.attrs = cmf_attributes_ext,
1168 };
1169 
1170 static ssize_t cmb_enable_show(struct device *dev,
1171 			       struct device_attribute *attr,
1172 			       char *buf)
1173 {
1174 	return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
1175 }
1176 
1177 static ssize_t cmb_enable_store(struct device *dev,
1178 				struct device_attribute *attr, const char *buf,
1179 				size_t c)
1180 {
1181 	struct ccw_device *cdev;
1182 	int ret;
1183 	unsigned long val;
1184 
1185 	ret = kstrtoul(buf, 16, &val);
1186 	if (ret)
1187 		return ret;
1188 
1189 	cdev = to_ccwdev(dev);
1190 
1191 	switch (val) {
1192 	case 0:
1193 		ret = disable_cmf(cdev);
1194 		break;
1195 	case 1:
1196 		ret = enable_cmf(cdev);
1197 		break;
1198 	}
1199 
1200 	return c;
1201 }
1202 
1203 DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
1204 
1205 int ccw_set_cmf(struct ccw_device *cdev, int enable)
1206 {
1207 	return cmbops->set(cdev, enable ? 2 : 0);
1208 }
1209 
1210 /**
1211  * enable_cmf() - switch on the channel measurement for a specific device
1212  *  @cdev:	The ccw device to be enabled
1213  *
1214  *  Returns %0 for success or a negative error value.
1215  *
1216  *  Context:
1217  *    non-atomic
1218  */
1219 int enable_cmf(struct ccw_device *cdev)
1220 {
1221 	int ret;
1222 
1223 	ret = cmbops->alloc(cdev);
1224 	cmbops->reset(cdev);
1225 	if (ret)
1226 		return ret;
1227 	ret = cmbops->set(cdev, 2);
1228 	if (ret) {
1229 		cmbops->free(cdev);
1230 		return ret;
1231 	}
1232 	ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
1233 	if (!ret)
1234 		return 0;
1235 	cmbops->set(cdev, 0);  //FIXME: this can fail
1236 	cmbops->free(cdev);
1237 	return ret;
1238 }
1239 
1240 /**
1241  * disable_cmf() - switch off the channel measurement for a specific device
1242  *  @cdev:	The ccw device to be disabled
1243  *
1244  *  Returns %0 for success or a negative error value.
1245  *
1246  *  Context:
1247  *    non-atomic
1248  */
1249 int disable_cmf(struct ccw_device *cdev)
1250 {
1251 	int ret;
1252 
1253 	ret = cmbops->set(cdev, 0);
1254 	if (ret)
1255 		return ret;
1256 	cmbops->free(cdev);
1257 	sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1258 	return ret;
1259 }
1260 
1261 /**
1262  * cmf_read() - read one value from the current channel measurement block
1263  * @cdev:	the channel to be read
1264  * @index:	the index of the value to be read
1265  *
1266  * Returns the value read or %0 if the value cannot be read.
1267  *
1268  *  Context:
1269  *    any
1270  */
1271 u64 cmf_read(struct ccw_device *cdev, int index)
1272 {
1273 	return cmbops->read(cdev, index);
1274 }
1275 
1276 /**
1277  * cmf_readall() - read the current channel measurement block
1278  * @cdev:	the channel to be read
1279  * @data:	a pointer to a data block that will be filled
1280  *
1281  * Returns %0 on success, a negative error value otherwise.
1282  *
1283  *  Context:
1284  *    any
1285  */
1286 int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
1287 {
1288 	return cmbops->readall(cdev, data);
1289 }
1290 
1291 /* Reenable cmf when a disconnected device becomes available again. */
1292 int cmf_reenable(struct ccw_device *cdev)
1293 {
1294 	cmbops->reset(cdev);
1295 	return cmbops->set(cdev, 2);
1296 }
1297 
1298 static int __init init_cmf(void)
1299 {
1300 	char *format_string;
1301 	char *detect_string = "parameter";
1302 
1303 	/*
1304 	 * If the user did not give a parameter, see if we are running on a
1305 	 * machine supporting extended measurement blocks, otherwise fall back
1306 	 * to basic mode.
1307 	 */
1308 	if (format == CMF_AUTODETECT) {
1309 		if (!css_general_characteristics.ext_mb) {
1310 			format = CMF_BASIC;
1311 		} else {
1312 			format = CMF_EXTENDED;
1313 		}
1314 		detect_string = "autodetected";
1315 	} else {
1316 		detect_string = "parameter";
1317 	}
1318 
1319 	switch (format) {
1320 	case CMF_BASIC:
1321 		format_string = "basic";
1322 		cmbops = &cmbops_basic;
1323 		break;
1324 	case CMF_EXTENDED:
1325 		format_string = "extended";
1326 		cmbops = &cmbops_extended;
1327 		break;
1328 	default:
1329 		return 1;
1330 	}
1331 	pr_info("Channel measurement facility initialized using format "
1332 		"%s (mode %s)\n", format_string, detect_string);
1333 	return 0;
1334 }
1335 
1336 module_init(init_cmf);
1337 
1338 
1339 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1340 MODULE_LICENSE("GPL");
1341 MODULE_DESCRIPTION("channel measurement facility base driver\n"
1342 		   "Copyright IBM Corp. 2003\n");
1343 
1344 EXPORT_SYMBOL_GPL(enable_cmf);
1345 EXPORT_SYMBOL_GPL(disable_cmf);
1346 EXPORT_SYMBOL_GPL(cmf_read);
1347 EXPORT_SYMBOL_GPL(cmf_readall);
1348