xref: /titanic_52/usr/src/lib/libcpc/common/libcpc.c (revision aad74983c015b9377370a62fd6867b6074434d3e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <libcpc.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <errno.h>
30 #include <strings.h>
31 #include <unistd.h>
32 #include <stropts.h>
33 #include <libintl.h>
34 #include <signal.h>
35 #include <sys/syscall.h>
36 #include <sys/types.h>
37 #include <sys/processor.h>
38 #include <sys/procset.h>
39 
40 #include "libcpc_impl.h"
41 
42 #define	MASK32 0xFFFFFFFF
43 
44 /*
45  * The library uses the cpc_lock field of the cpc_t struct to protect access to
46  * the linked lists inside the cpc_t, and only the linked lists. It is NOT used
47  * to protect against a user shooting his/herself in the foot (such as, for
48  * instance, destroying the same set at the same time from different threads.).
49  *
50  * SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
51  * an app holding the lock and a signal handler attempting to sample or bind.
52  */
53 
54 static char *cpc_get_list(int which, int arg);
55 static void cpc_err(cpc_t *cpc, const char *fn, int subcode, ...);
56 static int cpc_set_valid(cpc_t *cpc, cpc_set_t *set);
57 static int cpc_lock(cpc_t *cpc);
58 static void cpc_unlock(cpc_t *cpc, int blocked);
59 static int cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev);
60 static int cpc_valid_attr(cpc_t *cpc, char *attr);
61 static void cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx);
62 
63 cpc_t *
64 cpc_open(int ver)
65 {
66 	cpc_t	*cpc;
67 	void	(*sigsaved)();
68 	int	error = 0;
69 	int	i;
70 	int	j;
71 
72 	if (ver != CPC_VER_CURRENT) {
73 		/*
74 		 * v1 clients must stick to the v1 interface: cpc_version()
75 		 */
76 		errno = EINVAL;
77 		return (NULL);
78 	}
79 
80 	/*
81 	 * Call the syscall with invalid parameters.  If we get ENOSYS this CPU
82 	 * has no CPC support.  We need to block SIGSYS because the syscall code
83 	 * will send the signal if the system call fails to load.
84 	 */
85 	sigsaved = signal(SIGSYS, SIG_IGN);
86 	if (syscall(SYS_cpc, -1, -1, -1, -1, -1) != -1) {
87 		(void) signal(SIGSYS, sigsaved);
88 		errno = EINVAL;
89 		return (NULL);
90 	}
91 	error = errno;
92 	(void) signal(SIGSYS, sigsaved);
93 
94 	if (error != EINVAL) {
95 		errno = error;
96 		return (NULL);
97 	}
98 
99 	if ((cpc = malloc(sizeof (cpc_t))) == NULL) {
100 		errno = ENOMEM;
101 		return (NULL);
102 	}
103 
104 	cpc->cpc_npic = syscall(SYS_cpc, CPC_NPIC, -1, 0, 0, 0);
105 	cpc->cpc_caps = syscall(SYS_cpc, CPC_CAPS, -1, 0, 0, 0);
106 
107 	if (syscall(SYS_cpc, CPC_IMPL_NAME, -1, &cpc->cpc_cciname, 0, 0) != 0)
108 		return (NULL);
109 	if (syscall(SYS_cpc, CPC_CPUREF, -1, &cpc->cpc_cpuref, 0, 0) != 0)
110 		return (NULL);
111 
112 
113 	if ((cpc->cpc_attrlist = cpc_get_list(CPC_LIST_ATTRS, 0)) == NULL) {
114 		free(cpc);
115 		return (NULL);
116 	}
117 
118 	if ((cpc->cpc_evlist = malloc(cpc->cpc_npic * sizeof (char *))) ==
119 	    NULL) {
120 		free(cpc->cpc_attrlist);
121 		free(cpc);
122 		return (NULL);
123 	}
124 
125 	for (i = 0; i < cpc->cpc_npic; i++) {
126 		if ((cpc->cpc_evlist[i] = cpc_get_list(CPC_LIST_EVENTS, i)) ==
127 		    NULL)
128 			break;
129 	}
130 	if (i != cpc->cpc_npic) {
131 		for (j = 0; j < i; j++)
132 			free(cpc->cpc_evlist[j]);
133 		free(cpc->cpc_evlist);
134 		free(cpc->cpc_attrlist);
135 		free(cpc);
136 		return (NULL);
137 	}
138 
139 	cpc->cpc_sets = NULL;
140 	cpc->cpc_bufs = NULL;
141 	cpc->cpc_errfn = NULL;
142 	(void) mutex_init(&cpc->cpc_lock, USYNC_THREAD, NULL);
143 	__pctx_cpc_register_callback(cpc_invalidate_pctx);
144 
145 	return (cpc);
146 }
147 
148 /*
149  * Ensure state is cleaned up:
150  *
151  * - Hardware is unbound
152  * - Sets are all destroyed
153  * - Bufs are all freed
154  */
155 int
156 cpc_close(cpc_t *cpc)
157 {
158 	while (cpc->cpc_sets != NULL) {
159 		if (cpc->cpc_sets->cs_state != CS_UNBOUND)
160 			(void) cpc_unbind(cpc, cpc->cpc_sets);
161 		(void) cpc_set_destroy(cpc, cpc->cpc_sets);
162 	}
163 
164 	while (cpc->cpc_bufs != NULL)
165 		(void) cpc_buf_destroy(cpc, cpc->cpc_bufs);
166 
167 	free(cpc);
168 	return (0);
169 }
170 
171 cpc_set_t *
172 cpc_set_create(cpc_t *cpc)
173 {
174 	cpc_set_t	*set;
175 	int		sigblocked;
176 
177 	if ((set = malloc(sizeof (*set))) == NULL) {
178 		errno = ENOMEM;
179 		return (NULL);
180 	}
181 
182 	set->cs_request = NULL;
183 	set->cs_nreqs	= 0;
184 	set->cs_state	= CS_UNBOUND;
185 	set->cs_fd	= -1;
186 	set->cs_pctx	= NULL;
187 	set->cs_id	= -1;
188 	set->cs_thr	= NULL;
189 
190 	sigblocked = cpc_lock(cpc);
191 	set->cs_next = cpc->cpc_sets;
192 	cpc->cpc_sets = set;
193 	cpc_unlock(cpc, sigblocked);
194 
195 	return (set);
196 }
197 
198 int
199 cpc_set_destroy(cpc_t *cpc, cpc_set_t *set)
200 {
201 	cpc_set_t	*csp, *prev;
202 	cpc_request_t	*req, *next;
203 	int		sigblocked;
204 
205 	/*
206 	 * Remove this set from the cpc handle's list of sets.
207 	 */
208 	sigblocked = cpc_lock(cpc);
209 	for (csp = prev = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
210 		if (csp == set)
211 			break;
212 		prev = csp;
213 	}
214 	if (csp == NULL) {
215 		cpc_unlock(cpc, sigblocked);
216 		errno = EINVAL;
217 		return (-1);
218 	}
219 	if (csp == cpc->cpc_sets)
220 		cpc->cpc_sets = csp->cs_next;
221 	prev->cs_next = csp->cs_next;
222 	cpc_unlock(cpc, sigblocked);
223 
224 	if (csp->cs_state != CS_UNBOUND)
225 		(void) cpc_unbind(cpc, csp);
226 
227 	for (req = csp->cs_request; req != NULL; req = next) {
228 		next = req->cr_next;
229 
230 		if (req->cr_nattrs != 0)
231 			free(req->cr_attr);
232 
233 		free(req);
234 	}
235 
236 
237 	free(set);
238 
239 	return (0);
240 }
241 
242 /*ARGSUSED*/
243 int
244 cpc_set_add_request(cpc_t *cpc, cpc_set_t *set, const char *event,
245     uint64_t preset, uint_t flags, uint_t nattrs, const cpc_attr_t *attrs)
246 {
247 	cpc_request_t	*req;
248 	const char	*fn = "cpc_set_add_request";
249 	int		i;
250 	int		npics = cpc_npic(cpc);
251 
252 	if (cpc_set_valid(cpc, set) != 0 || set->cs_state != CS_UNBOUND) {
253 		errno = EINVAL;
254 		return (-1);
255 	}
256 
257 	for (i = 0; i < npics; i++)
258 		if (cpc_valid_event(cpc, i, event))
259 			break;
260 	if (i == npics) {
261 		cpc_err(cpc, fn, CPC_INVALID_EVENT);
262 		errno = EINVAL;
263 		return (-1);
264 	}
265 
266 	if ((req = malloc(sizeof (*req))) == NULL) {
267 		errno = ENOMEM;
268 		return (-1);
269 	}
270 
271 	(void) strncpy(req->cr_event, event, CPC_MAX_EVENT_LEN);
272 	req->cr_preset = preset;
273 	req->cr_flags = flags;
274 	req->cr_nattrs = nattrs;
275 	req->cr_index = set->cs_nreqs;
276 	req->cr_attr = NULL;
277 
278 	if (nattrs != 0) {
279 		for (i = 0; i < nattrs; i++) {
280 			/*
281 			 * Verify that each attribute name is legal and valid.
282 			 */
283 			if (attrs[i].ca_name[0] == '\0' ||
284 			    cpc_valid_attr(cpc, attrs[i].ca_name) == 0) {
285 				cpc_err(cpc, fn, CPC_INVALID_ATTRIBUTE);
286 				goto inval;
287 			}
288 
289 			/*
290 			 * If the user requested a specific picnum, ensure that
291 			 * the pic can count the requested event.
292 			 */
293 			if (strncmp("picnum", attrs[i].ca_name, 8) == 0) {
294 				if (attrs[i].ca_val >= npics) {
295 					cpc_err(cpc, fn, CPC_INVALID_PICNUM);
296 					goto inval;
297 				}
298 
299 				if (cpc_valid_event(cpc, attrs[i].ca_val,
300 				    req->cr_event) == 0) {
301 					cpc_err(cpc, fn, CPC_PIC_NOT_CAPABLE);
302 					goto inval;
303 				}
304 			}
305 		}
306 
307 		if ((req->cr_attr = malloc(nattrs * sizeof (kcpc_attr_t)))
308 		    == NULL) {
309 			free(req);
310 			return (-1);
311 		}
312 
313 		for (i = 0; i < nattrs; i++) {
314 			req->cr_attr[i].ka_val = attrs[i].ca_val;
315 			(void) strncpy(req->cr_attr[i].ka_name,
316 			    attrs[i].ca_name, CPC_MAX_ATTR_LEN);
317 		}
318 	} else
319 		req->cr_attr = NULL;
320 
321 	req->cr_next = set->cs_request;
322 	set->cs_request = req;
323 	set->cs_nreqs++;
324 
325 	return (req->cr_index);
326 
327 inval:
328 	free(req);
329 	errno = EINVAL;
330 	return (-1);
331 }
332 
333 cpc_buf_t *
334 cpc_buf_create(cpc_t *cpc, cpc_set_t *set)
335 {
336 	cpc_buf_t	*buf;
337 	int		sigblocked;
338 
339 	if (cpc_set_valid(cpc, set) != 0) {
340 		errno = EINVAL;
341 		return (NULL);
342 	}
343 
344 	if ((buf = malloc(sizeof (*buf))) == NULL)
345 		return (NULL);
346 
347 	buf->cb_size = set->cs_nreqs * sizeof (uint64_t);
348 	if ((buf->cb_data = malloc(buf->cb_size)) == NULL) {
349 		free(buf);
350 		return (NULL);
351 	}
352 
353 	bzero(buf->cb_data, buf->cb_size);
354 
355 	buf->cb_hrtime = 0;
356 	buf->cb_tick = 0;
357 
358 	sigblocked = cpc_lock(cpc);
359 	buf->cb_next = cpc->cpc_bufs;
360 	cpc->cpc_bufs = buf;
361 	cpc_unlock(cpc, sigblocked);
362 
363 	return (buf);
364 }
365 
366 int
367 cpc_buf_destroy(cpc_t *cpc, cpc_buf_t *buf)
368 {
369 	cpc_buf_t	*cbp, *prev;
370 	int		sigblocked;
371 
372 	/*
373 	 * Remove this buf from the cpc handle's list of bufs.
374 	 */
375 	sigblocked = cpc_lock(cpc);
376 	for (cbp = prev = cpc->cpc_bufs; cbp != NULL; cbp = cbp->cb_next) {
377 		if (cbp == buf)
378 			break;
379 		prev = cbp;
380 	}
381 	if (cbp == NULL) {
382 		cpc_unlock(cpc, sigblocked);
383 		errno = EINVAL;
384 		return (-1);
385 	}
386 	if (cbp == cpc->cpc_bufs)
387 		cpc->cpc_bufs = cbp->cb_next;
388 	prev->cb_next = cbp->cb_next;
389 
390 	cpc_unlock(cpc, sigblocked);
391 	free(cbp->cb_data);
392 	free(cbp);
393 
394 	return (0);
395 }
396 
397 /*ARGSUSED*/
398 int
399 cpc_bind_curlwp(cpc_t *cpc, cpc_set_t *set, uint_t flags)
400 {
401 	char		*packed_set;
402 	size_t		packsize;
403 	int		ret;
404 	int		subcode = -1;
405 
406 	/*
407 	 * We don't bother checking cpc_set_valid() here, because this is in the
408 	 * fast path of an app doing SIGEMT-based profiling as they restart the
409 	 * counters from their signal handler.
410 	 */
411 	if (CPC_SET_VALID_FLAGS(flags) == 0 || set->cs_nreqs <= 0) {
412 		errno = EINVAL;
413 		return (-1);
414 	}
415 
416 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
417 		errno = ENOMEM;
418 		return (-1);
419 	}
420 
421 	ret = syscall(SYS_cpc, CPC_BIND, -1, packed_set, packsize, &subcode);
422 	free(packed_set);
423 
424 	if (ret != 0) {
425 		if (subcode != -1)
426 			cpc_err(cpc, "cpc_bind_curlwp", subcode);
427 		return (-1);
428 	}
429 
430 	set->cs_thr = thr_self();
431 	set->cs_state = CS_BOUND_CURLWP;
432 	return (ret);
433 }
434 
435 /*ARGSUSED*/
436 int
437 cpc_bind_pctx(cpc_t *cpc, pctx_t *pctx, id_t id, cpc_set_t *set, uint_t flags)
438 {
439 	char		*packed_set;
440 	size_t		packsize;
441 	int		ret;
442 	int		subcode = -1;
443 
444 	/*
445 	 * cpc_bind_pctx() currently has no valid flags.
446 	 */
447 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
448 		errno = EINVAL;
449 		return (-1);
450 	}
451 
452 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
453 		errno = ENOMEM;
454 		return (-1);
455 	}
456 
457 	ret = __pctx_cpc(pctx, cpc, CPC_BIND, id, packed_set, (void *)packsize,
458 	    (void *)&subcode, -1);
459 
460 	free(packed_set);
461 
462 	if (ret == 0) {
463 		set->cs_pctx = pctx;
464 		set->cs_id = id;
465 		set->cs_state = CS_BOUND_PCTX;
466 	} else if (subcode != -1)
467 		cpc_err(cpc, "cpc_bind_pctx", subcode);
468 
469 	return (ret);
470 }
471 
472 /*ARGSUSED*/
473 int
474 cpc_bind_cpu(cpc_t *cpc, processorid_t id, cpc_set_t *set, uint_t flags)
475 {
476 	int		fd;
477 	char		*packed_set;
478 	size_t		packsize;
479 	__cpc_args_t	cpc_args;
480 	int		error;
481 	const char	*fn = "cpc_bind_cpu";
482 	int		subcode = -1;
483 
484 	/*
485 	 * cpc_bind_cpu() currently has no valid flags.
486 	 */
487 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
488 		errno = EINVAL;
489 		return (-1);
490 	}
491 
492 	if (processor_bind(P_LWPID, P_MYID, id, &set->cs_obind) == -1) {
493 		cpc_err(cpc, fn, CPC_PBIND_FAILED);
494 		return (-1);
495 	}
496 
497 	if ((fd = open(CPUDRV_SHARED, O_RDWR)) < 0) {
498 		error = errno;
499 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
500 		errno = error;
501 		return (-1);
502 	}
503 
504 	/*
505 	 * To avoid leaking file descriptors, if we find an existing fd here we
506 	 * just close it. This is only a problem if a user attempts to bind the
507 	 * same set to different CPUs without first unbinding it.
508 	 */
509 	if (set->cs_fd != -1)
510 		(void) close(set->cs_fd);
511 	set->cs_fd = fd;
512 
513 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
514 		(void) close(fd);
515 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
516 		errno = ENOMEM;
517 		return (-1);
518 	}
519 
520 	cpc_args.udata1 = packed_set;
521 	cpc_args.udata2 = (void *)packsize;
522 	cpc_args.udata3 = (void *)&subcode;
523 
524 	if (ioctl(fd, CPCIO_BIND, &cpc_args) != 0) {
525 		error = errno;
526 		free(packed_set);
527 		(void) close(fd);
528 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
529 		if (subcode != -1)
530 			cpc_err(cpc, fn, subcode);
531 		errno = error;
532 		return (-1);
533 	}
534 
535 	free(packed_set);
536 
537 	set->cs_thr = thr_self();
538 	set->cs_state = CS_BOUND_CPU;
539 
540 	return (0);
541 }
542 
543 /*ARGSUSED*/
544 int
545 cpc_request_preset(cpc_t *cpc, int index, uint64_t preset)
546 {
547 	return (syscall(SYS_cpc, CPC_PRESET, -1, index,
548 	    (uint32_t)(preset >> 32), (uint32_t)(preset & MASK32)));
549 }
550 
551 /*ARGSUSED*/
552 int
553 cpc_set_restart(cpc_t *cpc, cpc_set_t *set)
554 {
555 	return (syscall(SYS_cpc, CPC_RESTART, -1, 0, 0, 0));
556 }
557 
558 /*ARGSUSED*/
559 int
560 cpc_unbind(cpc_t *cpc, cpc_set_t *set)
561 {
562 	int		ret = 0;
563 	int		error;
564 
565 	if (cpc_set_valid(cpc, set) != 0) {
566 		errno = EINVAL;
567 		return (-1);
568 	}
569 
570 	switch (set->cs_state) {
571 	case CS_UNBOUND:
572 		errno = EINVAL;
573 		return (-1);
574 	case CS_BOUND_CURLWP:
575 		ret = syscall(SYS_cpc, CPC_RELE, -1, 0, 0, 0);
576 		error = errno;
577 		break;
578 	case CS_BOUND_CPU:
579 		ret = ioctl(set->cs_fd, CPCIO_RELE, NULL);
580 		error = errno;
581 		(void) close(set->cs_fd);
582 		set->cs_fd = -1;
583 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
584 		break;
585 	case CS_BOUND_PCTX:
586 		if (set->cs_pctx != NULL) {
587 			ret = __pctx_cpc(set->cs_pctx, cpc, CPC_RELE,
588 			    set->cs_id, 0, 0, 0, 0);
589 			error = errno;
590 		}
591 		break;
592 	}
593 
594 	set->cs_thr = NULL;
595 	set->cs_id = -1;
596 	set->cs_state = CS_UNBOUND;
597 	if (ret != 0)
598 		errno = error;
599 	return (ret);
600 }
601 
602 /*ARGSUSED*/
603 int
604 cpc_set_sample(cpc_t *cpc, cpc_set_t *set, cpc_buf_t *buf)
605 {
606 	__cpc_args_t args;
607 
608 	/*
609 	 * The following check ensures that only the most recently bound set
610 	 * can be sampled, as binding a set invalidates all other sets in the
611 	 * cpc_t.
612 	 */
613 	if (set->cs_state == CS_UNBOUND ||
614 	    buf->cb_size != set->cs_nreqs * sizeof (uint64_t)) {
615 		errno = EINVAL;
616 		return (-1);
617 	}
618 
619 	switch (set->cs_state) {
620 	case CS_BOUND_CURLWP:
621 		return (syscall(SYS_cpc, CPC_SAMPLE, -1, buf->cb_data,
622 		    &buf->cb_hrtime, &buf->cb_tick));
623 	case CS_BOUND_CPU:
624 		args.udata1 = buf->cb_data;
625 		args.udata2 = &buf->cb_hrtime;
626 		args.udata3 = &buf->cb_tick;
627 		return (ioctl(set->cs_fd, CPCIO_SAMPLE, &args));
628 	case CS_BOUND_PCTX:
629 		return (__pctx_cpc(set->cs_pctx, cpc, CPC_SAMPLE, set->cs_id,
630 		    buf->cb_data, &buf->cb_hrtime, &buf->cb_tick,
631 		    buf->cb_size));
632 	}
633 
634 	errno = EINVAL;
635 	return (-1);
636 }
637 
638 /*ARGSUSED*/
639 void
640 cpc_buf_sub(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
641 {
642 	int i;
643 
644 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
645 		return;
646 
647 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
648 	    a->cb_hrtime : b->cb_hrtime;
649 	ds->cb_tick = a->cb_tick - b->cb_tick;
650 
651 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
652 		ds->cb_data[i] = a->cb_data[i] - b->cb_data[i];
653 }
654 
655 /*ARGSUSED*/
656 void
657 cpc_buf_add(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
658 {
659 	int i;
660 
661 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
662 		return;
663 
664 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
665 	    a->cb_hrtime : b->cb_hrtime;
666 	ds->cb_tick = a->cb_tick + b->cb_tick;
667 
668 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
669 		ds->cb_data[i] = a->cb_data[i] + b->cb_data[i];
670 }
671 
672 /*ARGSUSED*/
673 void
674 cpc_buf_copy(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *src)
675 {
676 	if (ds->cb_size != src->cb_size)
677 		return;
678 
679 	bcopy(src->cb_data, ds->cb_data, ds->cb_size);
680 	ds->cb_hrtime = src->cb_hrtime;
681 	ds->cb_tick = src->cb_tick;
682 }
683 
684 /*ARGSUSED*/
685 void
686 cpc_buf_zero(cpc_t *cpc, cpc_buf_t *buf)
687 {
688 	bzero(buf->cb_data, buf->cb_size);
689 	buf->cb_hrtime = 0;
690 	buf->cb_tick = 0;
691 }
692 
693 /*
694  * Gets or sets the value of the request specified by index.
695  */
696 /*ARGSUSED*/
697 int
698 cpc_buf_get(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t *val)
699 {
700 	*val = buf->cb_data[index];
701 
702 	return (0);
703 }
704 
705 /*ARGSUSED*/
706 int
707 cpc_buf_set(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t val)
708 {
709 	buf->cb_data[index] = val;
710 
711 	return (0);
712 }
713 
714 /*ARGSUSED*/
715 hrtime_t
716 cpc_buf_hrtime(cpc_t *cpc, cpc_buf_t *buf)
717 {
718 	return (buf->cb_hrtime);
719 }
720 
721 /*ARGSUSED*/
722 uint64_t
723 cpc_buf_tick(cpc_t *cpc, cpc_buf_t *buf)
724 {
725 	return (buf->cb_tick);
726 }
727 
728 static char *
729 cpc_get_list(int which, int arg)
730 {
731 	int	szcmd;
732 	int	size;
733 	char	*list;
734 
735 	if (which == CPC_LIST_ATTRS)
736 		szcmd = CPC_ATTRLIST_SIZE;
737 	else
738 		szcmd = CPC_EVLIST_SIZE;
739 
740 	if (syscall(SYS_cpc, szcmd, -1, &size, arg, 0) != 0)
741 		return (NULL);
742 
743 	if ((list = malloc(size)) == NULL)
744 		return (NULL);
745 
746 	if (syscall(SYS_cpc, which, -1, list, arg, 0) != 0) {
747 		free(list);
748 		return (NULL);
749 	}
750 
751 	return (list);
752 }
753 
754 /*ARGSUSED*/
755 void
756 cpc_walk_requests(cpc_t *cpc, cpc_set_t *set, void *arg,
757     void (*action)(void *arg, int index, const char *event, uint64_t preset,
758 	uint_t flags, int nattrs, const cpc_attr_t *attrs))
759 {
760 	cpc_request_t	*rp;
761 	cpc_attr_t	*attrs = NULL;
762 	int		i;
763 
764 	for (rp = set->cs_request; rp != NULL; rp = rp->cr_next) {
765 		/*
766 		 * Need to reconstruct a temporary cpc_attr_t array for req.
767 		 */
768 		if (rp->cr_nattrs != 0)
769 			if ((attrs = malloc(rp->cr_nattrs *
770 			    sizeof (cpc_attr_t))) == NULL)
771 				return;
772 		for (i = 0; i < rp->cr_nattrs; i++) {
773 			attrs[i].ca_name = rp->cr_attr[i].ka_name;
774 			attrs[i].ca_val = rp->cr_attr[i].ka_val;
775 		}
776 
777 		action(arg, rp->cr_index, rp->cr_event, rp->cr_preset,
778 		    rp->cr_flags, rp->cr_nattrs, attrs);
779 
780 		if (rp->cr_nattrs != 0)
781 			free(attrs);
782 	}
783 }
784 
785 /*ARGSUSED*/
786 static void
787 cpc_walk_events_impl(cpc_t *cpc, void *arg,
788     void (*action)(void *arg, const char *event), int is_generic)
789 {
790 	char		**list;
791 	char		*p, *e;
792 	int		i;
793 	int		is_papi;
794 	int		ncounters = cpc_npic(cpc);
795 	cpc_strhash_t	*hash;
796 
797 	if ((list = malloc(ncounters * sizeof (char *))) == NULL)
798 		return;
799 
800 	if ((hash = __cpc_strhash_alloc()) == NULL) {
801 		free(list);
802 		return;
803 	}
804 
805 	for (i = 0; i < ncounters; i++) {
806 		if ((list[i] = strdup(cpc->cpc_evlist[i])) == NULL)
807 			goto err;
808 		p = list[i];
809 		while ((e = strchr(p, ',')) != NULL) {
810 			*e = '\0';
811 
812 			/*
813 			 * Based on is_generic flag, skip appropriate
814 			 * event names.
815 			 */
816 			is_papi = (strncmp(p, "PAPI", 4) == 0);
817 			if (is_generic != is_papi) {
818 				p = e + 1;
819 				continue;
820 			}
821 
822 			if (__cpc_strhash_add(hash, p) == -1)
823 				goto err;
824 
825 			p = e + 1;
826 		}
827 
828 		is_papi = (strncmp(p, "PAPI", 4) == 0);
829 		if (is_generic == is_papi) {
830 			if (__cpc_strhash_add(hash, p) == -1)
831 				goto err;
832 		}
833 	}
834 
835 	while ((p = __cpc_strhash_next(hash)) != NULL)
836 		action(arg, p);
837 
838 err:
839 	__cpc_strhash_free(hash);
840 	for (i = 0; i < ncounters; i++)
841 		free(list[i]);
842 	free(list);
843 }
844 
845 /*ARGSUSED*/
846 void
847 cpc_walk_events_all(cpc_t *cpc, void *arg,
848 		    void (*action)(void *arg, const char *event))
849 {
850 	cpc_walk_events_impl(cpc, arg, action, 0);
851 }
852 
853 
854 /*ARGSUSED*/
855 void
856 cpc_walk_generic_events_all(cpc_t *cpc, void *arg,
857 			    void (*action)(void *arg, const char *event))
858 {
859 	cpc_walk_events_impl(cpc, arg, action, 1);
860 }
861 
862 /*ARGSUSED*/
863 static void
864 cpc_walk_events_pic_impl(cpc_t *cpc, uint_t picno, void *arg,
865     void (*action)(void *arg, uint_t picno, const char *event), int is_generic)
866 {
867 	char	*p;
868 	char	*e;
869 	char	*list;
870 	int	is_papi;
871 
872 	if (picno >= cpc->cpc_npic) {
873 		errno = EINVAL;
874 		return;
875 	}
876 
877 	if ((list = strdup(cpc->cpc_evlist[picno])) == NULL)
878 		return;
879 
880 	/*
881 	 * List now points to a comma-separated list of events supported by
882 	 * the designated pic.
883 	 */
884 	p = list;
885 	while ((e = strchr(p, ',')) != NULL) {
886 		*e = '\0';
887 
888 		/*
889 		 * Based on is_generic flag, skip appropriate
890 		 * event names.
891 		 */
892 		is_papi = (strncmp(p, "PAPI", 4) == 0);
893 		if (is_generic != is_papi) {
894 			p = e + 1;
895 			continue;
896 		}
897 
898 		action(arg, picno, p);
899 		p = e + 1;
900 	}
901 
902 	is_papi = (strncmp(p, "PAPI", 4) == 0);
903 	if (is_generic == is_papi)
904 		action(arg, picno, p);
905 
906 	free(list);
907 }
908 
909 /*ARGSUSED*/
910 void
911 cpc_walk_events_pic(cpc_t *cpc, uint_t picno, void *arg,
912     void (*action)(void *arg, uint_t picno, const char *event))
913 {
914 	cpc_walk_events_pic_impl(cpc, picno, arg, action, 0);
915 }
916 
917 /*ARGSUSED*/
918 void
919 cpc_walk_generic_events_pic(cpc_t *cpc, uint_t picno, void *arg,
920     void (*action)(void *arg, uint_t picno, const char *event))
921 {
922 	cpc_walk_events_pic_impl(cpc, picno, arg, action, 1);
923 }
924 
925 /*ARGSUSED*/
926 void
927 cpc_walk_attrs(cpc_t *cpc, void *arg,
928     void (*action)(void *arg, const char *attr))
929 {
930 	char	*p;
931 	char	*e;
932 	char	*list;
933 
934 	if ((list = strdup(cpc->cpc_attrlist)) == NULL)
935 		return;
936 
937 	/*
938 	 * Platforms with no attributes will return an empty string.
939 	 */
940 	if (*list == '\0')
941 		return;
942 
943 	/*
944 	 * List now points to a comma-separated list of attributes supported by
945 	 * the underlying platform.
946 	 */
947 	p = list;
948 	while ((e = strchr(p, ',')) != NULL) {
949 		*e = '\0';
950 		action(arg, p);
951 		p = e + 1;
952 	}
953 	action(arg, p);
954 
955 	free(list);
956 }
957 
958 /*ARGSUSED*/
959 int
960 cpc_enable(cpc_t *cpc)
961 {
962 	return (syscall(SYS_cpc, CPC_ENABLE, -1, 0, 0, 0));
963 }
964 
965 /*ARGSUSED*/
966 int
967 cpc_disable(cpc_t *cpc)
968 {
969 	return (syscall(SYS_cpc, CPC_DISABLE, -1, 0, 0, 0));
970 }
971 
972 /*ARGSUSED*/
973 uint_t
974 cpc_npic(cpc_t *cpc)
975 {
976 	return (cpc->cpc_npic);
977 }
978 
979 /*ARGSUSED*/
980 uint_t
981 cpc_caps(cpc_t *cpc)
982 {
983 	return (cpc->cpc_caps);
984 }
985 
986 const char *
987 cpc_cciname(cpc_t *cpc)
988 {
989 	return (cpc->cpc_cciname);
990 }
991 
992 const char *
993 cpc_cpuref(cpc_t *cpc)
994 {
995 	return (cpc->cpc_cpuref);
996 }
997 
998 int
999 cpc_seterrhndlr(cpc_t *cpc, cpc_errhndlr_t *fn)
1000 {
1001 	cpc->cpc_errfn = fn;
1002 	return (0);
1003 }
1004 
1005 /*
1006  * These strings may contain printf() conversion specifiers.
1007  */
1008 static const char *errstr[] = {
1009 "",						/* zero slot filler */
1010 "Unknown event\n",				/* CPC_INVALID_EVENT */
1011 "Invalid counter number\n",			/* CPC_INVALID_PICNUM */
1012 "Unknown attribute\n",				/* CPC_INVALID_ATTRIBUTE */
1013 "Attribute out of range\n",			/* CPC_ATTRIBUTE_OUT_OF_RANGE */
1014 "Hardware resource unavailable\n",		/* CPC_RESOURCE_UNAVAIL */
1015 "Counter cannot count requested event\n",	/* CPC_PIC_NOT_CAPABLE */
1016 "Invalid flags in a request\n",			/* CPC_REQ_INVALID_FLAGS */
1017 "Requests conflict with each other\n",		/* CPC_CONFLICTING_REQS */
1018 "Attribute requires the cpc_cpu privilege\n",  /* CPC_ATTR_REQUIRES_PRIVILEGE */
1019 "Couldn't bind LWP to requested processor\n",	/* CPC_PBIND_FAILED */
1020 "Hypervisor event access denied\n"		/* CPC_HV_NO_ACCESS */
1021 };
1022 
1023 /*VARARGS3*/
1024 static void
1025 cpc_err(cpc_t *cpc, const char *fn, int subcode, ...)
1026 {
1027 	va_list		ap;
1028 	const char	*str;
1029 	int		error;
1030 
1031 	/*
1032 	 * If subcode is -1, there is no specific description for this error.
1033 	 */
1034 	if (subcode == -1)
1035 		return;
1036 
1037 	/*
1038 	 * We need to preserve errno across calls to this function to prevent it
1039 	 * from being clobbered while here, or in the user's error handler.
1040 	 */
1041 	error = errno;
1042 
1043 	str = dgettext(TEXT_DOMAIN, errstr[subcode]);
1044 
1045 	va_start(ap, subcode);
1046 	if (cpc->cpc_errfn != NULL)
1047 		cpc->cpc_errfn(fn, subcode, str, ap);
1048 	else {
1049 		/*
1050 		 * If printf() conversion specifiers are added to the errstr[]
1051 		 * table, this call needs to be changed to vfprintf().
1052 		 */
1053 		(void) fprintf(stderr, "libcpc: %s: %s", fn, str);
1054 	}
1055 	va_end(ap);
1056 
1057 	errno = error;
1058 }
1059 
1060 /*
1061  * Hook used by libpctx to alert libcpc when a pctx handle is going away.
1062  * This is necessary to prevent libcpc from attempting a libpctx operation on a
1063  * stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
1064  * to be notified when they go away.
1065  */
1066 static void
1067 cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx)
1068 {
1069 	cpc_set_t	*set;
1070 	int		sigblocked;
1071 
1072 	sigblocked = cpc_lock(cpc);
1073 	for (set = cpc->cpc_sets; set != NULL; set = set->cs_next)
1074 		if (set->cs_pctx == pctx)
1075 			set->cs_pctx = NULL;
1076 	cpc_unlock(cpc, sigblocked);
1077 }
1078 
1079 /*
1080  * Check that the set is valid; if so it will be in the cpc handle's
1081  * list of sets. The lock protects the list of sets, but not the set
1082  * itself.
1083  */
1084 static int
1085 cpc_set_valid(cpc_t *cpc, cpc_set_t *set)
1086 {
1087 	cpc_set_t	*csp;
1088 	int		sigblocked;
1089 
1090 	sigblocked = cpc_lock(cpc);
1091 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next)
1092 		if (csp == set)
1093 			break;
1094 	cpc_unlock(cpc, sigblocked);
1095 	if (csp == NULL)
1096 		return (-1);
1097 	return (0);
1098 }
1099 
1100 static int
1101 cpc_lock(cpc_t *cpc)
1102 {
1103 	int ret = (sigset(SIGEMT, SIG_HOLD) == SIG_HOLD);
1104 	(void) mutex_lock(&cpc->cpc_lock);
1105 	return (ret);
1106 }
1107 
1108 static void
1109 cpc_unlock(cpc_t *cpc, int sigblocked)
1110 {
1111 	(void) mutex_unlock(&cpc->cpc_lock);
1112 	if (sigblocked == 0)
1113 		(void) sigrelse(SIGEMT);
1114 }
1115 
1116 struct priv {
1117 	const char *name;
1118 	int found;
1119 };
1120 
1121 /*ARGSUSED*/
1122 static void
1123 ev_walker(void *arg, uint_t picno, const char *ev)
1124 {
1125 	if (strcmp(((struct priv *)arg)->name, ev) == 0)
1126 		((struct priv *)arg)->found = 1;
1127 }
1128 
1129 static void
1130 at_walker(void *arg, const char *at)
1131 {
1132 	if (strcmp(((struct priv *)arg)->name, at) == 0)
1133 		((struct priv *)arg)->found = 1;
1134 }
1135 
1136 static int
1137 cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev)
1138 {
1139 	struct priv pr = { NULL, 0 };
1140 	char *end_ev;
1141 	int err;
1142 
1143 	pr.name = ev;
1144 	cpc_walk_events_pic(cpc, pic, &pr, ev_walker);
1145 	if (pr.found)
1146 		return (1);
1147 
1148 	cpc_walk_generic_events_pic(cpc, pic, &pr, ev_walker);
1149 	if (pr.found)
1150 		return (1);
1151 
1152 	/*
1153 	 * Before assuming this is an invalid event, see if we have been given
1154 	 * a raw event code.
1155 	 * Check the second argument of strtol() to ensure invalid events
1156 	 * beginning with number do not go through.
1157 	 */
1158 	err = errno;
1159 	errno = 0;
1160 	(void) strtol(ev, &end_ev, 0);
1161 	if ((errno == 0) && (*end_ev == '\0')) {
1162 		/*
1163 		 * Success - this is a valid raw code in hex, decimal, or octal.
1164 		 */
1165 		errno = err;
1166 		return (1);
1167 	}
1168 
1169 	errno = err;
1170 	return (0);
1171 }
1172 
1173 static int
1174 cpc_valid_attr(cpc_t *cpc, char *attr)
1175 {
1176 	struct priv pr = { NULL, 0 };
1177 
1178 	pr.name = attr;
1179 	cpc_walk_attrs(cpc, &pr, at_walker);
1180 	return (pr.found);
1181 }
1182