xref: /titanic_44/usr/src/lib/libcpc/common/obsoleted.c (revision 1a6fa3a5205c95d7c4ea1caa5f0f9b408e0a2c1b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <sys/syscall.h>
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <strings.h>
38 #include <stdarg.h>
39 #include <signal.h>
40 #include <libintl.h>
41 #include <dirent.h>
42 #include <sys/cpc_impl.h>
43 
44 #include "libcpc.h"
45 #include "libcpc_impl.h"
46 
47 /*
48  * CPC library handle for use by CPCv1 implementation.
49  */
50 cpc_t *__cpc = NULL;
51 mutex_t __cpc_lock;		/* protects __cpc handle */
52 int __cpc_v1_cpuver;		/* CPU version in use by CPCv1 client */
53 
54 #ifdef __sparc
55 uint64_t __cpc_v1_pcr;		/* last bound %pcr value */
56 #else
57 uint32_t __cpc_v1_pes[2];	/* last bound %pes values */
58 #endif /* __sparc */
59 
60 int
__cpc_init(void)61 __cpc_init(void)
62 {
63 	const char *fn = "__cpc_init";
64 	extern cpc_t *__cpc;	/* CPC handle for obsolete clients to share */
65 
66 	(void) mutex_lock(&__cpc_lock);
67 	if (__cpc == NULL && (__cpc = cpc_open(CPC_VER_CURRENT)) == NULL) {
68 		__cpc_error(fn, dgettext(TEXT_DOMAIN,
69 		    "Couldn't open CPC library handle\n"));
70 		(void) mutex_unlock(&__cpc_lock);
71 		return (-1);
72 	}
73 	(void) mutex_unlock(&__cpc_lock);
74 
75 	return (0);
76 }
77 
78 int
cpc_bind_event(cpc_event_t * this,int flags)79 cpc_bind_event(cpc_event_t *this, int flags)
80 {
81 	cpc_set_t		*set;
82 	cpc_request_t		*rp;
83 	int			ret;
84 
85 	if (this == NULL) {
86 		(void) cpc_rele();
87 		return (0);
88 	}
89 
90 	if (__cpc_init() != 0) {
91 		errno = ENXIO;
92 		return (-1);
93 	}
94 
95 	/*
96 	 * The cpuver and control fields of the cpc_event_t must be saved off
97 	 * for later. The user may call cpc_take_sample(), expecting these to
98 	 * be copied into a different cpc_event_t struct by the kernel. We have
99 	 * to fake that behavior for CPCv1 clients.
100 	 */
101 	__cpc_v1_cpuver = this->ce_cpuver;
102 #ifdef __sparc
103 	__cpc_v1_pcr = this->ce_pcr;
104 #else
105 	__cpc_v1_pes[0] = this->ce_pes[0];
106 	__cpc_v1_pes[1] = this->ce_pes[1];
107 #endif /* __sparc */
108 
109 	if ((set = __cpc_eventtoset(__cpc, this, flags)) == NULL) {
110 		errno = EINVAL;
111 		return (-1);
112 	}
113 
114 	/*
115 	 * Convert flags to CPC2.
116 	 */
117 	if (flags & CPC_BIND_EMT_OVF) {
118 		for (rp = set->cs_request; rp != NULL; rp = rp->cr_next)
119 			rp->cr_flags |= CPC_OVF_NOTIFY_EMT;
120 		flags &= ~CPC_BIND_EMT_OVF;
121 	}
122 
123 	ret = cpc_bind_curlwp(__cpc, set, flags);
124 
125 	(void) cpc_set_destroy(__cpc, set);
126 
127 	return (ret);
128 }
129 
130 int
cpc_take_sample(cpc_event_t * this)131 cpc_take_sample(cpc_event_t *this)
132 {
133 	this->ce_cpuver = __cpc_v1_cpuver;
134 #ifdef __sparc
135 	this->ce_pcr = __cpc_v1_pcr;
136 #else
137 	this->ce_pes[0] = __cpc_v1_pes[0];
138 	this->ce_pes[1] = __cpc_v1_pes[1];
139 #endif /* __sparc */
140 
141 	return (syscall(SYS_cpc, CPC_SAMPLE, -1, this->ce_pic, &this->ce_hrt,
142 	    &CPC_TICKREG(this), 0));
143 }
144 
145 int
cpc_count_usr_events(int enable)146 cpc_count_usr_events(int enable)
147 {
148 	return (syscall(SYS_cpc, CPC_USR_EVENTS, -1, enable, 0));
149 }
150 
151 int
cpc_count_sys_events(int enable)152 cpc_count_sys_events(int enable)
153 {
154 	return (syscall(SYS_cpc, CPC_SYS_EVENTS, -1, enable, 0));
155 }
156 
157 int
cpc_rele(void)158 cpc_rele(void)
159 {
160 	return (syscall(SYS_cpc, CPC_RELE, -1, NULL, 0));
161 }
162 
163 /*
164  * See if the system call is working and installed.
165  *
166  * We invoke the system call with nonsense arguments - if it's
167  * there and working correctly, it will return EINVAL.
168  *
169  * (This avoids the user getting a SIGSYS core dump when they attempt
170  * to bind on older hardware)
171  */
172 int
cpc_access(void)173 cpc_access(void)
174 {
175 	void (*handler)(int);
176 	int error = 0;
177 	const char fn[] = "access";
178 
179 	handler = signal(SIGSYS, SIG_IGN);
180 	if (syscall(SYS_cpc, -1, -1, NULL, 0) == -1 &&
181 	    errno != EINVAL)
182 		error = errno;
183 	(void) signal(SIGSYS, handler);
184 
185 	switch (error) {
186 	case EAGAIN:
187 		__cpc_error(fn, dgettext(TEXT_DOMAIN, "Another process may be "
188 		    "sampling system-wide CPU statistics\n"));
189 		break;
190 	case ENOSYS:
191 		__cpc_error(fn,
192 		    dgettext(TEXT_DOMAIN, "CPU performance counters "
193 		    "are inaccessible on this machine\n"));
194 		break;
195 	default:
196 		__cpc_error(fn, "%s\n", strerror(errno));
197 		break;
198 	case 0:
199 		return (0);
200 	}
201 
202 	errno = error;
203 	return (-1);
204 }
205 
206 /*
207  * To look at the system-wide counters, we have to open the
208  * 'shared' device.  Once that device is open, no further contexts
209  * can be installed (though one open is needed per CPU)
210  */
211 int
cpc_shared_open(void)212 cpc_shared_open(void)
213 {
214 	const char driver[] = CPUDRV_SHARED;
215 
216 	return (open(driver, O_RDWR));
217 }
218 
219 void
cpc_shared_close(int fd)220 cpc_shared_close(int fd)
221 {
222 	(void) cpc_shared_rele(fd);
223 	(void) close(fd);
224 }
225 
226 int
cpc_shared_bind_event(int fd,cpc_event_t * this,int flags)227 cpc_shared_bind_event(int fd, cpc_event_t *this, int flags)
228 {
229 	extern cpc_t		*__cpc;
230 	cpc_set_t		*set;
231 	int			ret;
232 	char			*packed_set;
233 	size_t			packsize;
234 	int			subcode;
235 	__cpc_args_t		cpc_args;
236 
237 	if (this == NULL) {
238 		(void) cpc_shared_rele(fd);
239 		return (0);
240 	} else if (flags != 0) {
241 		errno = EINVAL;
242 		return (-1);
243 	}
244 
245 	if (__cpc_init() != 0) {
246 		errno = ENXIO;
247 		return (-1);
248 	}
249 
250 	if ((set = __cpc_eventtoset(__cpc, this, flags)) == NULL) {
251 		errno = EINVAL;
252 		return (-1);
253 	}
254 
255 	__cpc_v1_cpuver = this->ce_cpuver;
256 
257 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
258 		errno = ENOMEM;
259 		return (-1);
260 	}
261 
262 	cpc_args.udata1 = packed_set;
263 	cpc_args.udata2 = (void *)packsize;
264 	cpc_args.udata3 = (void *)&subcode;
265 
266 	ret = ioctl(fd, CPCIO_BIND, &cpc_args);
267 
268 	free(packed_set);
269 	(void) cpc_set_destroy(__cpc, set);
270 
271 	return (ret);
272 }
273 
274 int
cpc_shared_take_sample(int fd,cpc_event_t * this)275 cpc_shared_take_sample(int fd, cpc_event_t *this)
276 {
277 	__cpc_args_t args;
278 
279 	args.udata1 = this->ce_pic;
280 	args.udata2 = &this->ce_hrt;
281 	args.udata3 = &CPC_TICKREG(this);
282 
283 	this->ce_cpuver = __cpc_v1_cpuver;
284 
285 	return (ioctl(fd, CPCIO_SAMPLE, &args));
286 }
287 
288 int
cpc_shared_rele(int fd)289 cpc_shared_rele(int fd)
290 {
291 	return (ioctl(fd, CPCIO_RELE, 0));
292 }
293 
294 int
cpc_pctx_bind_event(pctx_t * pctx,id_t lwpid,cpc_event_t * event,int flags)295 cpc_pctx_bind_event(pctx_t *pctx, id_t lwpid, cpc_event_t *event, int flags)
296 {
297 	cpc_set_t		*set;
298 	int			ret;
299 
300 	if (event == NULL)
301 		return (cpc_pctx_rele(pctx, lwpid));
302 
303 	if (__cpc_init() != 0) {
304 		errno = ENXIO;
305 		return (-1);
306 	}
307 
308 	else if (flags != 0) {
309 		errno = EINVAL;
310 		return (-1);
311 	}
312 
313 	if ((set = __cpc_eventtoset(__cpc, event, flags)) == NULL) {
314 		errno = EINVAL;
315 		return (-1);
316 	}
317 
318 	/*
319 	 * The cpuver and control fields of the cpc_event_t must be saved off
320 	 * for later. The user may call cpc_take_sample(), expecting these to
321 	 * be copied into a different cpc_event_t struct by the kernel. We have
322 	 * to fake that behavior for CPCv1 clients.
323 	 */
324 	__cpc_v1_cpuver = event->ce_cpuver;
325 
326 	ret = cpc_bind_pctx(__cpc, pctx, lwpid, set, 0);
327 
328 	(void) cpc_set_destroy(__cpc, set);
329 
330 	return (ret);
331 }
332 
333 int
cpc_pctx_take_sample(pctx_t * pctx,id_t lwpid,cpc_event_t * event)334 cpc_pctx_take_sample(pctx_t *pctx, id_t lwpid, cpc_event_t *event)
335 {
336 	event->ce_cpuver = __cpc_v1_cpuver;
337 
338 	return (__pctx_cpc(pctx, __cpc, CPC_SAMPLE, lwpid, event->ce_pic,
339 	    &event->ce_hrt, &CPC_TICKREG(event), CPC1_BUFSIZE));
340 }
341 
342 /*
343  * Given a process context and an lwpid, mark the CPU performance
344  * counter context as invalid.
345  */
346 int
cpc_pctx_invalidate(pctx_t * pctx,id_t lwpid)347 cpc_pctx_invalidate(pctx_t *pctx, id_t lwpid)
348 {
349 	return (__pctx_cpc(pctx, __cpc, CPC_INVALIDATE, lwpid, 0, 0, 0, 0));
350 }
351 
352 /*
353  * Given a process context and an lwpid, remove all our
354  * hardware context from it.
355  */
356 int
cpc_pctx_rele(pctx_t * pctx,id_t lwpid)357 cpc_pctx_rele(pctx_t *pctx, id_t lwpid)
358 {
359 	return (__pctx_cpc(pctx, __cpc, CPC_RELE, lwpid, 0, 0, 0, 0));
360 }
361 
362 static cpc_errfn_t *__cpc_uerrfn;
363 
364 /*PRINTFLIKE2*/
365 void
__cpc_error(const char * fn,const char * fmt,...)366 __cpc_error(const char *fn, const char *fmt, ...)
367 {
368 	va_list ap;
369 
370 	va_start(ap, fmt);
371 	if (__cpc_uerrfn)
372 		__cpc_uerrfn(fn, fmt, ap);
373 	else {
374 		(void) fprintf(stderr, "libcpc: %s: ", fn);
375 		(void) vfprintf(stderr, fmt, ap);
376 	}
377 	va_end(ap);
378 }
379 
380 void
cpc_seterrfn(cpc_errfn_t * errfn)381 cpc_seterrfn(cpc_errfn_t *errfn)
382 {
383 	__cpc_uerrfn = errfn;
384 }
385 
386 /*
387  * cpc_version() is only for CPC1 clients.
388  */
389 uint_t __cpc_workver = CPC_VER_1;
390 
391 uint_t
cpc_version(uint_t ver)392 cpc_version(uint_t ver)
393 {
394 	__cpc_workver = CPC_VER_1;
395 
396 	switch (ver) {
397 	case CPC_VER_NONE:
398 	case CPC_VER_CURRENT:
399 		return (CPC_VER_CURRENT);
400 	case CPC_VER_1:
401 		/*
402 		 * As long as the client is using cpc_version() at all, it is
403 		 * a CPCv1 client.  We still allow CPCv1 clients to compile on
404 		 * CPCv2 systems.
405 		 */
406 		return (CPC_VER_1);
407 	}
408 
409 	return (CPC_VER_NONE);
410 }
411