xref: /titanic_44/usr/src/lib/libcpc/common/libcpc.c (revision 91d7f85e02991954d1e1bd44673df567ad8dcc87)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <libcpc.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <errno.h>
30 #include <strings.h>
31 #include <unistd.h>
32 #include <stropts.h>
33 #include <libintl.h>
34 #include <signal.h>
35 #include <sys/syscall.h>
36 #include <sys/types.h>
37 #include <sys/processor.h>
38 #include <sys/procset.h>
39 
40 #include "libcpc_impl.h"
41 
42 #define	MASK32 0xFFFFFFFF
43 
44 /*
45  * The library uses the cpc_lock field of the cpc_t struct to protect access to
46  * the linked lists inside the cpc_t, and only the linked lists. It is NOT used
47  * to protect against a user shooting his/herself in the foot (such as, for
48  * instance, destroying the same set at the same time from different threads.).
49  *
50  * SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
51  * an app holding the lock and a signal handler attempting to sample or bind.
52  */
53 
54 static char *cpc_get_list(int which, int arg);
55 static void cpc_err(cpc_t *cpc, const char *fn, int subcode, ...);
56 static int cpc_set_valid(cpc_t *cpc, cpc_set_t *set);
57 static int cpc_lock(cpc_t *cpc);
58 static void cpc_unlock(cpc_t *cpc, int blocked);
59 static int cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev);
60 static int cpc_valid_attr(cpc_t *cpc, char *attr);
61 static void cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx);
62 
63 cpc_t *
64 cpc_open(int ver)
65 {
66 	cpc_t	*cpc;
67 	void	(*sigsaved)();
68 	int	error = 0;
69 	int	i;
70 	int	j;
71 
72 	if (ver != CPC_VER_CURRENT) {
73 		/*
74 		 * v1 clients must stick to the v1 interface: cpc_version()
75 		 */
76 		errno = EINVAL;
77 		return (NULL);
78 	}
79 
80 	/*
81 	 * Call the syscall with invalid parameters.  If we get ENOSYS this CPU
82 	 * has no CPC support.  We need to block SIGSYS because the syscall code
83 	 * will send the signal if the system call fails to load.
84 	 */
85 	sigsaved = signal(SIGSYS, SIG_IGN);
86 	if (syscall(SYS_cpc, -1, -1, -1, -1, -1) != -1) {
87 		(void) signal(SIGSYS, sigsaved);
88 		errno = EINVAL;
89 		return (NULL);
90 	}
91 	error = errno;
92 	(void) signal(SIGSYS, sigsaved);
93 
94 	if (error != EINVAL) {
95 		errno = error;
96 		return (NULL);
97 	}
98 
99 	if ((cpc = malloc(sizeof (cpc_t))) == NULL) {
100 		errno = ENOMEM;
101 		return (NULL);
102 	}
103 
104 	cpc->cpc_npic = syscall(SYS_cpc, CPC_NPIC, -1, 0, 0, 0);
105 	cpc->cpc_caps = syscall(SYS_cpc, CPC_CAPS, -1, 0, 0, 0);
106 
107 	if (syscall(SYS_cpc, CPC_IMPL_NAME, -1, &cpc->cpc_cciname, 0, 0) != 0)
108 		return (NULL);
109 	if (syscall(SYS_cpc, CPC_CPUREF, -1, &cpc->cpc_cpuref, 0, 0) != 0)
110 		return (NULL);
111 
112 
113 	if ((cpc->cpc_attrlist = cpc_get_list(CPC_LIST_ATTRS, 0)) == NULL) {
114 		free(cpc);
115 		return (NULL);
116 	}
117 
118 	if ((cpc->cpc_evlist = malloc(cpc->cpc_npic * sizeof (char *))) ==
119 	    NULL) {
120 		free(cpc->cpc_attrlist);
121 		free(cpc);
122 		return (NULL);
123 	}
124 
125 	for (i = 0; i < cpc->cpc_npic; i++) {
126 		if ((cpc->cpc_evlist[i] = cpc_get_list(CPC_LIST_EVENTS, i)) ==
127 		    NULL)
128 			break;
129 	}
130 	if (i != cpc->cpc_npic) {
131 		for (j = 0; j < i; j++)
132 			free(cpc->cpc_evlist[j]);
133 		free(cpc->cpc_evlist);
134 		free(cpc->cpc_attrlist);
135 		free(cpc);
136 		return (NULL);
137 	}
138 
139 	cpc->cpc_sets = NULL;
140 	cpc->cpc_bufs = NULL;
141 	cpc->cpc_errfn = NULL;
142 	(void) mutex_init(&cpc->cpc_lock, USYNC_THREAD, NULL);
143 	__pctx_cpc_register_callback(cpc_invalidate_pctx);
144 
145 	return (cpc);
146 }
147 
148 /*
149  * Ensure state is cleaned up:
150  *
151  * - Hardware is unbound
152  * - Sets are all destroyed
153  * - Bufs are all freed
154  */
155 int
156 cpc_close(cpc_t *cpc)
157 {
158 	while (cpc->cpc_sets != NULL) {
159 		if (cpc->cpc_sets->cs_state != CS_UNBOUND)
160 			(void) cpc_unbind(cpc, cpc->cpc_sets);
161 		(void) cpc_set_destroy(cpc, cpc->cpc_sets);
162 	}
163 
164 	while (cpc->cpc_bufs != NULL)
165 		(void) cpc_buf_destroy(cpc, cpc->cpc_bufs);
166 
167 	free(cpc);
168 	return (0);
169 }
170 
171 /*
172  * Terminate everything that runs in pctx_run
173  */
174 void
175 cpc_terminate(cpc_t *cpc)
176 {
177 	cpc_set_t	*csp;
178 	int		sigblocked;
179 
180 	sigblocked = cpc_lock(cpc);
181 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
182 		if (csp->cs_pctx != NULL)
183 			pctx_terminate(csp->cs_pctx);
184 	}
185 	cpc_unlock(cpc, sigblocked);
186 }
187 
188 cpc_set_t *
189 cpc_set_create(cpc_t *cpc)
190 {
191 	cpc_set_t	*set;
192 	int		sigblocked;
193 
194 	if ((set = malloc(sizeof (*set))) == NULL) {
195 		errno = ENOMEM;
196 		return (NULL);
197 	}
198 
199 	set->cs_request = NULL;
200 	set->cs_nreqs	= 0;
201 	set->cs_state	= CS_UNBOUND;
202 	set->cs_fd	= -1;
203 	set->cs_pctx	= NULL;
204 	set->cs_id	= -1;
205 	set->cs_thr	= NULL;
206 
207 	sigblocked = cpc_lock(cpc);
208 	set->cs_next = cpc->cpc_sets;
209 	cpc->cpc_sets = set;
210 	cpc_unlock(cpc, sigblocked);
211 
212 	return (set);
213 }
214 
215 int
216 cpc_set_destroy(cpc_t *cpc, cpc_set_t *set)
217 {
218 	cpc_set_t	*csp, *prev;
219 	cpc_request_t	*req, *next;
220 	int		sigblocked;
221 
222 	/*
223 	 * Remove this set from the cpc handle's list of sets.
224 	 */
225 	sigblocked = cpc_lock(cpc);
226 	for (csp = prev = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
227 		if (csp == set)
228 			break;
229 		prev = csp;
230 	}
231 	if (csp == NULL) {
232 		cpc_unlock(cpc, sigblocked);
233 		errno = EINVAL;
234 		return (-1);
235 	}
236 	if (csp == cpc->cpc_sets)
237 		cpc->cpc_sets = csp->cs_next;
238 	prev->cs_next = csp->cs_next;
239 	cpc_unlock(cpc, sigblocked);
240 
241 	if (csp->cs_state != CS_UNBOUND)
242 		(void) cpc_unbind(cpc, csp);
243 
244 	/*
245 	 * Detach from the process
246 	 */
247 	if (csp->cs_pctx != NULL) {
248 		pctx_release(csp->cs_pctx);
249 		csp->cs_pctx = NULL;
250 	}
251 
252 	for (req = csp->cs_request; req != NULL; req = next) {
253 		next = req->cr_next;
254 
255 		if (req->cr_nattrs != 0)
256 			free(req->cr_attr);
257 
258 		free(req);
259 	}
260 
261 
262 	free(set);
263 
264 	return (0);
265 }
266 
267 /*ARGSUSED*/
268 int
269 cpc_set_add_request(cpc_t *cpc, cpc_set_t *set, const char *event,
270     uint64_t preset, uint_t flags, uint_t nattrs, const cpc_attr_t *attrs)
271 {
272 	cpc_request_t	*req;
273 	const char	*fn = "cpc_set_add_request";
274 	int		i;
275 	int		npics = cpc_npic(cpc);
276 
277 	if (cpc_set_valid(cpc, set) != 0 || set->cs_state != CS_UNBOUND) {
278 		errno = EINVAL;
279 		return (-1);
280 	}
281 
282 	for (i = 0; i < npics; i++)
283 		if (cpc_valid_event(cpc, i, event))
284 			break;
285 	if (i == npics) {
286 		cpc_err(cpc, fn, CPC_INVALID_EVENT);
287 		errno = EINVAL;
288 		return (-1);
289 	}
290 
291 	if ((req = malloc(sizeof (*req))) == NULL) {
292 		errno = ENOMEM;
293 		return (-1);
294 	}
295 
296 	(void) strncpy(req->cr_event, event, CPC_MAX_EVENT_LEN);
297 	req->cr_preset = preset;
298 	req->cr_flags = flags;
299 	req->cr_nattrs = nattrs;
300 	req->cr_index = set->cs_nreqs;
301 	req->cr_attr = NULL;
302 
303 	if (nattrs != 0) {
304 		for (i = 0; i < nattrs; i++) {
305 			/*
306 			 * Verify that each attribute name is legal and valid.
307 			 */
308 			if (attrs[i].ca_name[0] == '\0' ||
309 			    cpc_valid_attr(cpc, attrs[i].ca_name) == 0) {
310 				cpc_err(cpc, fn, CPC_INVALID_ATTRIBUTE);
311 				goto inval;
312 			}
313 
314 			/*
315 			 * If the user requested a specific picnum, ensure that
316 			 * the pic can count the requested event.
317 			 */
318 			if (strncmp("picnum", attrs[i].ca_name, 8) == 0) {
319 				if (attrs[i].ca_val >= npics) {
320 					cpc_err(cpc, fn, CPC_INVALID_PICNUM);
321 					goto inval;
322 				}
323 
324 				if (cpc_valid_event(cpc, attrs[i].ca_val,
325 				    req->cr_event) == 0) {
326 					cpc_err(cpc, fn, CPC_PIC_NOT_CAPABLE);
327 					goto inval;
328 				}
329 			}
330 		}
331 
332 		if ((req->cr_attr = malloc(nattrs * sizeof (kcpc_attr_t)))
333 		    == NULL) {
334 			free(req);
335 			return (-1);
336 		}
337 
338 		for (i = 0; i < nattrs; i++) {
339 			req->cr_attr[i].ka_val = attrs[i].ca_val;
340 			(void) strncpy(req->cr_attr[i].ka_name,
341 			    attrs[i].ca_name, CPC_MAX_ATTR_LEN);
342 		}
343 	} else
344 		req->cr_attr = NULL;
345 
346 	req->cr_next = set->cs_request;
347 	set->cs_request = req;
348 	set->cs_nreqs++;
349 
350 	return (req->cr_index);
351 
352 inval:
353 	free(req);
354 	errno = EINVAL;
355 	return (-1);
356 }
357 
358 cpc_buf_t *
359 cpc_buf_create(cpc_t *cpc, cpc_set_t *set)
360 {
361 	cpc_buf_t	*buf;
362 	int		sigblocked;
363 
364 	if (cpc_set_valid(cpc, set) != 0) {
365 		errno = EINVAL;
366 		return (NULL);
367 	}
368 
369 	if ((buf = malloc(sizeof (*buf))) == NULL)
370 		return (NULL);
371 
372 	buf->cb_size = set->cs_nreqs * sizeof (uint64_t);
373 	if ((buf->cb_data = malloc(buf->cb_size)) == NULL) {
374 		free(buf);
375 		return (NULL);
376 	}
377 
378 	bzero(buf->cb_data, buf->cb_size);
379 
380 	buf->cb_hrtime = 0;
381 	buf->cb_tick = 0;
382 
383 	sigblocked = cpc_lock(cpc);
384 	buf->cb_next = cpc->cpc_bufs;
385 	cpc->cpc_bufs = buf;
386 	cpc_unlock(cpc, sigblocked);
387 
388 	return (buf);
389 }
390 
391 int
392 cpc_buf_destroy(cpc_t *cpc, cpc_buf_t *buf)
393 {
394 	cpc_buf_t	*cbp, *prev;
395 	int		sigblocked;
396 
397 	/*
398 	 * Remove this buf from the cpc handle's list of bufs.
399 	 */
400 	sigblocked = cpc_lock(cpc);
401 	for (cbp = prev = cpc->cpc_bufs; cbp != NULL; cbp = cbp->cb_next) {
402 		if (cbp == buf)
403 			break;
404 		prev = cbp;
405 	}
406 	if (cbp == NULL) {
407 		cpc_unlock(cpc, sigblocked);
408 		errno = EINVAL;
409 		return (-1);
410 	}
411 	if (cbp == cpc->cpc_bufs)
412 		cpc->cpc_bufs = cbp->cb_next;
413 	prev->cb_next = cbp->cb_next;
414 
415 	cpc_unlock(cpc, sigblocked);
416 	free(cbp->cb_data);
417 	free(cbp);
418 
419 	return (0);
420 }
421 
422 /*ARGSUSED*/
423 int
424 cpc_bind_curlwp(cpc_t *cpc, cpc_set_t *set, uint_t flags)
425 {
426 	char		*packed_set;
427 	size_t		packsize;
428 	int		ret;
429 	int		subcode = -1;
430 
431 	/*
432 	 * We don't bother checking cpc_set_valid() here, because this is in the
433 	 * fast path of an app doing SIGEMT-based profiling as they restart the
434 	 * counters from their signal handler.
435 	 */
436 	if (CPC_SET_VALID_FLAGS(flags) == 0 || set->cs_nreqs <= 0) {
437 		errno = EINVAL;
438 		return (-1);
439 	}
440 
441 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
442 		errno = ENOMEM;
443 		return (-1);
444 	}
445 
446 	ret = syscall(SYS_cpc, CPC_BIND, -1, packed_set, packsize, &subcode);
447 	free(packed_set);
448 
449 	if (ret != 0) {
450 		if (subcode != -1)
451 			cpc_err(cpc, "cpc_bind_curlwp", subcode);
452 		return (-1);
453 	}
454 
455 	set->cs_thr = thr_self();
456 	set->cs_state = CS_BOUND_CURLWP;
457 	return (ret);
458 }
459 
460 /*ARGSUSED*/
461 int
462 cpc_bind_pctx(cpc_t *cpc, pctx_t *pctx, id_t id, cpc_set_t *set, uint_t flags)
463 {
464 	char		*packed_set;
465 	size_t		packsize;
466 	int		ret;
467 	int		subcode = -1;
468 
469 	/*
470 	 * cpc_bind_pctx() currently has no valid flags.
471 	 */
472 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
473 		errno = EINVAL;
474 		return (-1);
475 	}
476 
477 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
478 		errno = ENOMEM;
479 		return (-1);
480 	}
481 
482 	ret = __pctx_cpc(pctx, cpc, CPC_BIND, id, packed_set, (void *)packsize,
483 	    (void *)&subcode, -1);
484 
485 	free(packed_set);
486 
487 	if (ret == 0) {
488 		set->cs_pctx = pctx;
489 		set->cs_id = id;
490 		set->cs_state = CS_BOUND_PCTX;
491 	} else if (subcode != -1)
492 		cpc_err(cpc, "cpc_bind_pctx", subcode);
493 
494 	return (ret);
495 }
496 
497 /*ARGSUSED*/
498 int
499 cpc_bind_cpu(cpc_t *cpc, processorid_t id, cpc_set_t *set, uint_t flags)
500 {
501 	int		fd;
502 	char		*packed_set;
503 	size_t		packsize;
504 	__cpc_args_t	cpc_args;
505 	int		error;
506 	const char	*fn = "cpc_bind_cpu";
507 	int		subcode = -1;
508 
509 	/*
510 	 * cpc_bind_cpu() currently has no valid flags.
511 	 */
512 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
513 		errno = EINVAL;
514 		return (-1);
515 	}
516 
517 	if (processor_bind(P_LWPID, P_MYID, id, &set->cs_obind) == -1) {
518 		cpc_err(cpc, fn, CPC_PBIND_FAILED);
519 		return (-1);
520 	}
521 
522 	if ((fd = open(CPUDRV_SHARED, O_RDWR)) < 0) {
523 		error = errno;
524 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
525 		errno = error;
526 		return (-1);
527 	}
528 
529 	/*
530 	 * To avoid leaking file descriptors, if we find an existing fd here we
531 	 * just close it. This is only a problem if a user attempts to bind the
532 	 * same set to different CPUs without first unbinding it.
533 	 */
534 	if (set->cs_fd != -1)
535 		(void) close(set->cs_fd);
536 	set->cs_fd = fd;
537 
538 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
539 		(void) close(fd);
540 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
541 		errno = ENOMEM;
542 		return (-1);
543 	}
544 
545 	cpc_args.udata1 = packed_set;
546 	cpc_args.udata2 = (void *)packsize;
547 	cpc_args.udata3 = (void *)&subcode;
548 
549 	if (ioctl(fd, CPCIO_BIND, &cpc_args) != 0) {
550 		error = errno;
551 		free(packed_set);
552 		(void) close(fd);
553 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
554 		if (subcode != -1)
555 			cpc_err(cpc, fn, subcode);
556 		errno = error;
557 		return (-1);
558 	}
559 
560 	free(packed_set);
561 
562 	set->cs_thr = thr_self();
563 	set->cs_state = CS_BOUND_CPU;
564 
565 	return (0);
566 }
567 
568 /*ARGSUSED*/
569 int
570 cpc_request_preset(cpc_t *cpc, int index, uint64_t preset)
571 {
572 	return (syscall(SYS_cpc, CPC_PRESET, -1, index,
573 	    (uint32_t)(preset >> 32), (uint32_t)(preset & MASK32)));
574 }
575 
576 /*ARGSUSED*/
577 int
578 cpc_set_restart(cpc_t *cpc, cpc_set_t *set)
579 {
580 	return (syscall(SYS_cpc, CPC_RESTART, -1, 0, 0, 0));
581 }
582 
583 /*ARGSUSED*/
584 int
585 cpc_unbind(cpc_t *cpc, cpc_set_t *set)
586 {
587 	int		ret = 0;
588 	int		error;
589 
590 	if (cpc_set_valid(cpc, set) != 0) {
591 		errno = EINVAL;
592 		return (-1);
593 	}
594 
595 	switch (set->cs_state) {
596 	case CS_UNBOUND:
597 		errno = EINVAL;
598 		return (-1);
599 	case CS_BOUND_CURLWP:
600 		ret = syscall(SYS_cpc, CPC_RELE, -1, 0, 0, 0);
601 		error = errno;
602 		break;
603 	case CS_BOUND_CPU:
604 		ret = ioctl(set->cs_fd, CPCIO_RELE, NULL);
605 		error = errno;
606 		(void) close(set->cs_fd);
607 		set->cs_fd = -1;
608 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
609 		break;
610 	case CS_BOUND_PCTX:
611 		if (set->cs_pctx != NULL) {
612 			ret = __pctx_cpc(set->cs_pctx, cpc, CPC_RELE,
613 			    set->cs_id, 0, 0, 0, 0);
614 			error = errno;
615 		}
616 		break;
617 	}
618 
619 	set->cs_thr = NULL;
620 	set->cs_id = -1;
621 	set->cs_state = CS_UNBOUND;
622 	if (ret != 0)
623 		errno = error;
624 	return (ret);
625 }
626 
627 /*ARGSUSED*/
628 int
629 cpc_set_sample(cpc_t *cpc, cpc_set_t *set, cpc_buf_t *buf)
630 {
631 	__cpc_args_t args;
632 
633 	/*
634 	 * The following check ensures that only the most recently bound set
635 	 * can be sampled, as binding a set invalidates all other sets in the
636 	 * cpc_t.
637 	 */
638 	if (set->cs_state == CS_UNBOUND ||
639 	    buf->cb_size != set->cs_nreqs * sizeof (uint64_t)) {
640 		errno = EINVAL;
641 		return (-1);
642 	}
643 
644 	switch (set->cs_state) {
645 	case CS_BOUND_CURLWP:
646 		return (syscall(SYS_cpc, CPC_SAMPLE, -1, buf->cb_data,
647 		    &buf->cb_hrtime, &buf->cb_tick));
648 	case CS_BOUND_CPU:
649 		args.udata1 = buf->cb_data;
650 		args.udata2 = &buf->cb_hrtime;
651 		args.udata3 = &buf->cb_tick;
652 		return (ioctl(set->cs_fd, CPCIO_SAMPLE, &args));
653 	case CS_BOUND_PCTX:
654 		return (__pctx_cpc(set->cs_pctx, cpc, CPC_SAMPLE, set->cs_id,
655 		    buf->cb_data, &buf->cb_hrtime, &buf->cb_tick,
656 		    buf->cb_size));
657 	}
658 
659 	errno = EINVAL;
660 	return (-1);
661 }
662 
663 /*ARGSUSED*/
664 void
665 cpc_buf_sub(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
666 {
667 	int i;
668 
669 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
670 		return;
671 
672 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
673 	    a->cb_hrtime : b->cb_hrtime;
674 	ds->cb_tick = a->cb_tick - b->cb_tick;
675 
676 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
677 		ds->cb_data[i] = a->cb_data[i] - b->cb_data[i];
678 }
679 
680 /*ARGSUSED*/
681 void
682 cpc_buf_add(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
683 {
684 	int i;
685 
686 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
687 		return;
688 
689 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
690 	    a->cb_hrtime : b->cb_hrtime;
691 	ds->cb_tick = a->cb_tick + b->cb_tick;
692 
693 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
694 		ds->cb_data[i] = a->cb_data[i] + b->cb_data[i];
695 }
696 
697 /*ARGSUSED*/
698 void
699 cpc_buf_copy(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *src)
700 {
701 	if (ds->cb_size != src->cb_size)
702 		return;
703 
704 	bcopy(src->cb_data, ds->cb_data, ds->cb_size);
705 	ds->cb_hrtime = src->cb_hrtime;
706 	ds->cb_tick = src->cb_tick;
707 }
708 
709 /*ARGSUSED*/
710 void
711 cpc_buf_zero(cpc_t *cpc, cpc_buf_t *buf)
712 {
713 	bzero(buf->cb_data, buf->cb_size);
714 	buf->cb_hrtime = 0;
715 	buf->cb_tick = 0;
716 }
717 
718 /*
719  * Gets or sets the value of the request specified by index.
720  */
721 /*ARGSUSED*/
722 int
723 cpc_buf_get(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t *val)
724 {
725 	*val = buf->cb_data[index];
726 
727 	return (0);
728 }
729 
730 /*ARGSUSED*/
731 int
732 cpc_buf_set(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t val)
733 {
734 	buf->cb_data[index] = val;
735 
736 	return (0);
737 }
738 
739 /*ARGSUSED*/
740 hrtime_t
741 cpc_buf_hrtime(cpc_t *cpc, cpc_buf_t *buf)
742 {
743 	return (buf->cb_hrtime);
744 }
745 
746 /*ARGSUSED*/
747 uint64_t
748 cpc_buf_tick(cpc_t *cpc, cpc_buf_t *buf)
749 {
750 	return (buf->cb_tick);
751 }
752 
753 static char *
754 cpc_get_list(int which, int arg)
755 {
756 	int	szcmd;
757 	int	size;
758 	char	*list;
759 
760 	if (which == CPC_LIST_ATTRS)
761 		szcmd = CPC_ATTRLIST_SIZE;
762 	else
763 		szcmd = CPC_EVLIST_SIZE;
764 
765 	if (syscall(SYS_cpc, szcmd, -1, &size, arg, 0) != 0)
766 		return (NULL);
767 
768 	if ((list = malloc(size)) == NULL)
769 		return (NULL);
770 
771 	if (syscall(SYS_cpc, which, -1, list, arg, 0) != 0) {
772 		free(list);
773 		return (NULL);
774 	}
775 
776 	return (list);
777 }
778 
779 /*ARGSUSED*/
780 void
781 cpc_walk_requests(cpc_t *cpc, cpc_set_t *set, void *arg,
782     void (*action)(void *arg, int index, const char *event, uint64_t preset,
783 	uint_t flags, int nattrs, const cpc_attr_t *attrs))
784 {
785 	cpc_request_t	*rp;
786 	cpc_attr_t	*attrs = NULL;
787 	int		i;
788 
789 	for (rp = set->cs_request; rp != NULL; rp = rp->cr_next) {
790 		/*
791 		 * Need to reconstruct a temporary cpc_attr_t array for req.
792 		 */
793 		if (rp->cr_nattrs != 0)
794 			if ((attrs = malloc(rp->cr_nattrs *
795 			    sizeof (cpc_attr_t))) == NULL)
796 				return;
797 		for (i = 0; i < rp->cr_nattrs; i++) {
798 			attrs[i].ca_name = rp->cr_attr[i].ka_name;
799 			attrs[i].ca_val = rp->cr_attr[i].ka_val;
800 		}
801 
802 		action(arg, rp->cr_index, rp->cr_event, rp->cr_preset,
803 		    rp->cr_flags, rp->cr_nattrs, attrs);
804 
805 		if (rp->cr_nattrs != 0)
806 			free(attrs);
807 	}
808 }
809 
810 /*ARGSUSED*/
811 static void
812 cpc_walk_events_impl(cpc_t *cpc, void *arg,
813     void (*action)(void *arg, const char *event), int is_generic)
814 {
815 	char		**list;
816 	char		*p, *e;
817 	int		i;
818 	int		is_papi;
819 	int		ncounters = cpc_npic(cpc);
820 	cpc_strhash_t	*hash;
821 
822 	if ((list = malloc(ncounters * sizeof (char *))) == NULL)
823 		return;
824 
825 	if ((hash = __cpc_strhash_alloc()) == NULL) {
826 		free(list);
827 		return;
828 	}
829 
830 	for (i = 0; i < ncounters; i++) {
831 		if ((list[i] = strdup(cpc->cpc_evlist[i])) == NULL)
832 			goto err;
833 		p = list[i];
834 		while ((e = strchr(p, ',')) != NULL) {
835 			*e = '\0';
836 
837 			/*
838 			 * Based on is_generic flag, skip appropriate
839 			 * event names.
840 			 */
841 			is_papi = (strncmp(p, "PAPI", 4) == 0);
842 			if (is_generic != is_papi) {
843 				p = e + 1;
844 				continue;
845 			}
846 
847 			if (__cpc_strhash_add(hash, p) == -1)
848 				goto err;
849 
850 			p = e + 1;
851 		}
852 
853 		is_papi = (strncmp(p, "PAPI", 4) == 0);
854 		if (is_generic == is_papi) {
855 			if (__cpc_strhash_add(hash, p) == -1)
856 				goto err;
857 		}
858 	}
859 
860 	while ((p = __cpc_strhash_next(hash)) != NULL)
861 		action(arg, p);
862 
863 err:
864 	__cpc_strhash_free(hash);
865 	for (i = 0; i < ncounters; i++)
866 		free(list[i]);
867 	free(list);
868 }
869 
870 /*ARGSUSED*/
871 void
872 cpc_walk_events_all(cpc_t *cpc, void *arg,
873 		    void (*action)(void *arg, const char *event))
874 {
875 	cpc_walk_events_impl(cpc, arg, action, 0);
876 }
877 
878 
879 /*ARGSUSED*/
880 void
881 cpc_walk_generic_events_all(cpc_t *cpc, void *arg,
882 			    void (*action)(void *arg, const char *event))
883 {
884 	cpc_walk_events_impl(cpc, arg, action, 1);
885 }
886 
887 /*ARGSUSED*/
888 static void
889 cpc_walk_events_pic_impl(cpc_t *cpc, uint_t picno, void *arg,
890     void (*action)(void *arg, uint_t picno, const char *event), int is_generic)
891 {
892 	char	*p;
893 	char	*e;
894 	char	*list;
895 	int	is_papi;
896 
897 	if (picno >= cpc->cpc_npic) {
898 		errno = EINVAL;
899 		return;
900 	}
901 
902 	if ((list = strdup(cpc->cpc_evlist[picno])) == NULL)
903 		return;
904 
905 	/*
906 	 * List now points to a comma-separated list of events supported by
907 	 * the designated pic.
908 	 */
909 	p = list;
910 	while ((e = strchr(p, ',')) != NULL) {
911 		*e = '\0';
912 
913 		/*
914 		 * Based on is_generic flag, skip appropriate
915 		 * event names.
916 		 */
917 		is_papi = (strncmp(p, "PAPI", 4) == 0);
918 		if (is_generic != is_papi) {
919 			p = e + 1;
920 			continue;
921 		}
922 
923 		action(arg, picno, p);
924 		p = e + 1;
925 	}
926 
927 	is_papi = (strncmp(p, "PAPI", 4) == 0);
928 	if (is_generic == is_papi)
929 		action(arg, picno, p);
930 
931 	free(list);
932 }
933 
934 /*ARGSUSED*/
935 void
936 cpc_walk_events_pic(cpc_t *cpc, uint_t picno, void *arg,
937     void (*action)(void *arg, uint_t picno, const char *event))
938 {
939 	cpc_walk_events_pic_impl(cpc, picno, arg, action, 0);
940 }
941 
942 /*ARGSUSED*/
943 void
944 cpc_walk_generic_events_pic(cpc_t *cpc, uint_t picno, void *arg,
945     void (*action)(void *arg, uint_t picno, const char *event))
946 {
947 	cpc_walk_events_pic_impl(cpc, picno, arg, action, 1);
948 }
949 
950 /*ARGSUSED*/
951 void
952 cpc_walk_attrs(cpc_t *cpc, void *arg,
953     void (*action)(void *arg, const char *attr))
954 {
955 	char	*p;
956 	char	*e;
957 	char	*list;
958 
959 	if ((list = strdup(cpc->cpc_attrlist)) == NULL)
960 		return;
961 
962 	/*
963 	 * Platforms with no attributes will return an empty string.
964 	 */
965 	if (*list == '\0')
966 		return;
967 
968 	/*
969 	 * List now points to a comma-separated list of attributes supported by
970 	 * the underlying platform.
971 	 */
972 	p = list;
973 	while ((e = strchr(p, ',')) != NULL) {
974 		*e = '\0';
975 		action(arg, p);
976 		p = e + 1;
977 	}
978 	action(arg, p);
979 
980 	free(list);
981 }
982 
983 /*ARGSUSED*/
984 int
985 cpc_enable(cpc_t *cpc)
986 {
987 	return (syscall(SYS_cpc, CPC_ENABLE, -1, 0, 0, 0));
988 }
989 
990 /*ARGSUSED*/
991 int
992 cpc_disable(cpc_t *cpc)
993 {
994 	return (syscall(SYS_cpc, CPC_DISABLE, -1, 0, 0, 0));
995 }
996 
997 /*ARGSUSED*/
998 uint_t
999 cpc_npic(cpc_t *cpc)
1000 {
1001 	return (cpc->cpc_npic);
1002 }
1003 
1004 /*ARGSUSED*/
1005 uint_t
1006 cpc_caps(cpc_t *cpc)
1007 {
1008 	return (cpc->cpc_caps);
1009 }
1010 
1011 const char *
1012 cpc_cciname(cpc_t *cpc)
1013 {
1014 	return (cpc->cpc_cciname);
1015 }
1016 
1017 const char *
1018 cpc_cpuref(cpc_t *cpc)
1019 {
1020 	return (cpc->cpc_cpuref);
1021 }
1022 
1023 int
1024 cpc_seterrhndlr(cpc_t *cpc, cpc_errhndlr_t *fn)
1025 {
1026 	cpc->cpc_errfn = fn;
1027 	return (0);
1028 }
1029 
1030 /*
1031  * These strings may contain printf() conversion specifiers.
1032  */
1033 static const char *errstr[] = {
1034 "",						/* zero slot filler */
1035 "Unknown event\n",				/* CPC_INVALID_EVENT */
1036 "Invalid counter number\n",			/* CPC_INVALID_PICNUM */
1037 "Unknown attribute\n",				/* CPC_INVALID_ATTRIBUTE */
1038 "Attribute out of range\n",			/* CPC_ATTRIBUTE_OUT_OF_RANGE */
1039 "Hardware resource unavailable\n",		/* CPC_RESOURCE_UNAVAIL */
1040 "Counter cannot count requested event\n",	/* CPC_PIC_NOT_CAPABLE */
1041 "Invalid flags in a request\n",			/* CPC_REQ_INVALID_FLAGS */
1042 "Requests conflict with each other\n",		/* CPC_CONFLICTING_REQS */
1043 "Attribute requires the cpc_cpu privilege\n",  /* CPC_ATTR_REQUIRES_PRIVILEGE */
1044 "Couldn't bind LWP to requested processor\n",	/* CPC_PBIND_FAILED */
1045 "Hypervisor event access denied\n"		/* CPC_HV_NO_ACCESS */
1046 };
1047 
1048 /*VARARGS3*/
1049 static void
1050 cpc_err(cpc_t *cpc, const char *fn, int subcode, ...)
1051 {
1052 	va_list		ap;
1053 	const char	*str;
1054 	int		error;
1055 
1056 	/*
1057 	 * If subcode is -1, there is no specific description for this error.
1058 	 */
1059 	if (subcode == -1)
1060 		return;
1061 
1062 	/*
1063 	 * We need to preserve errno across calls to this function to prevent it
1064 	 * from being clobbered while here, or in the user's error handler.
1065 	 */
1066 	error = errno;
1067 
1068 	str = dgettext(TEXT_DOMAIN, errstr[subcode]);
1069 
1070 	va_start(ap, subcode);
1071 	if (cpc->cpc_errfn != NULL)
1072 		cpc->cpc_errfn(fn, subcode, str, ap);
1073 	else {
1074 		/*
1075 		 * If printf() conversion specifiers are added to the errstr[]
1076 		 * table, this call needs to be changed to vfprintf().
1077 		 */
1078 		(void) fprintf(stderr, "libcpc: %s: %s", fn, str);
1079 	}
1080 	va_end(ap);
1081 
1082 	errno = error;
1083 }
1084 
1085 /*
1086  * Hook used by libpctx to alert libcpc when a pctx handle is going away.
1087  * This is necessary to prevent libcpc from attempting a libpctx operation on a
1088  * stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
1089  * to be notified when they go away.
1090  */
1091 static void
1092 cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx)
1093 {
1094 	cpc_set_t	*set;
1095 	int		sigblocked;
1096 
1097 	sigblocked = cpc_lock(cpc);
1098 	for (set = cpc->cpc_sets; set != NULL; set = set->cs_next)
1099 		if (set->cs_pctx == pctx)
1100 			set->cs_pctx = NULL;
1101 	cpc_unlock(cpc, sigblocked);
1102 }
1103 
1104 /*
1105  * Check that the set is valid; if so it will be in the cpc handle's
1106  * list of sets. The lock protects the list of sets, but not the set
1107  * itself.
1108  */
1109 static int
1110 cpc_set_valid(cpc_t *cpc, cpc_set_t *set)
1111 {
1112 	cpc_set_t	*csp;
1113 	int		sigblocked;
1114 
1115 	sigblocked = cpc_lock(cpc);
1116 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next)
1117 		if (csp == set)
1118 			break;
1119 	cpc_unlock(cpc, sigblocked);
1120 	if (csp == NULL)
1121 		return (-1);
1122 	return (0);
1123 }
1124 
1125 static int
1126 cpc_lock(cpc_t *cpc)
1127 {
1128 	int ret = (sigset(SIGEMT, SIG_HOLD) == SIG_HOLD);
1129 	(void) mutex_lock(&cpc->cpc_lock);
1130 	return (ret);
1131 }
1132 
1133 static void
1134 cpc_unlock(cpc_t *cpc, int sigblocked)
1135 {
1136 	(void) mutex_unlock(&cpc->cpc_lock);
1137 	if (sigblocked == 0)
1138 		(void) sigrelse(SIGEMT);
1139 }
1140 
1141 struct priv {
1142 	const char *name;
1143 	int found;
1144 };
1145 
1146 /*ARGSUSED*/
1147 static void
1148 ev_walker(void *arg, uint_t picno, const char *ev)
1149 {
1150 	if (strcmp(((struct priv *)arg)->name, ev) == 0)
1151 		((struct priv *)arg)->found = 1;
1152 }
1153 
1154 static void
1155 at_walker(void *arg, const char *at)
1156 {
1157 	if (strcmp(((struct priv *)arg)->name, at) == 0)
1158 		((struct priv *)arg)->found = 1;
1159 }
1160 
1161 static int
1162 cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev)
1163 {
1164 	struct priv pr = { NULL, 0 };
1165 	char *end_ev;
1166 	int err;
1167 
1168 	pr.name = ev;
1169 	cpc_walk_events_pic(cpc, pic, &pr, ev_walker);
1170 	if (pr.found)
1171 		return (1);
1172 
1173 	cpc_walk_generic_events_pic(cpc, pic, &pr, ev_walker);
1174 	if (pr.found)
1175 		return (1);
1176 
1177 	/*
1178 	 * Before assuming this is an invalid event, see if we have been given
1179 	 * a raw event code.
1180 	 * Check the second argument of strtol() to ensure invalid events
1181 	 * beginning with number do not go through.
1182 	 */
1183 	err = errno;
1184 	errno = 0;
1185 	(void) strtol(ev, &end_ev, 0);
1186 	if ((errno == 0) && (*end_ev == '\0')) {
1187 		/*
1188 		 * Success - this is a valid raw code in hex, decimal, or octal.
1189 		 */
1190 		errno = err;
1191 		return (1);
1192 	}
1193 
1194 	errno = err;
1195 	return (0);
1196 }
1197 
1198 static int
1199 cpc_valid_attr(cpc_t *cpc, char *attr)
1200 {
1201 	struct priv pr = { NULL, 0 };
1202 
1203 	pr.name = attr;
1204 	cpc_walk_attrs(cpc, &pr, at_walker);
1205 	return (pr.found);
1206 }
1207