xref: /illumos-gate/usr/src/lib/libcpc/common/libcpc.c (revision 0245b61fd282e95735b173b8d95be0d6688163b4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  */
26 
27 #include <libcpc.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <errno.h>
31 #include <strings.h>
32 #include <unistd.h>
33 #include <stropts.h>
34 #include <libintl.h>
35 #include <signal.h>
36 #include <sys/syscall.h>
37 #include <sys/types.h>
38 #include <sys/processor.h>
39 #include <sys/procset.h>
40 
41 #include "libcpc_impl.h"
42 
43 #define	MASK32 0xFFFFFFFF
44 
45 /*
46  * The library uses the cpc_lock field of the cpc_t struct to protect access to
47  * the linked lists inside the cpc_t, and only the linked lists. It is NOT used
48  * to protect against users shooting themselves in the foot (such as, for
49  * instance, destroying the same set at the same time from different threads.).
50  *
51  * SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
52  * an app holding the lock and a signal handler attempting to sample or bind.
53  */
54 
55 static char *cpc_get_list(int which, int arg);
56 static void cpc_err(cpc_t *cpc, const char *fn, int subcode, ...);
57 static int cpc_set_valid(cpc_t *cpc, cpc_set_t *set);
58 static int cpc_lock(cpc_t *cpc);
59 static void cpc_unlock(cpc_t *cpc, int blocked);
60 static int cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev);
61 static int cpc_valid_attr(cpc_t *cpc, char *attr);
62 static void cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx);
63 
64 cpc_t *
65 cpc_open(int ver)
66 {
67 	cpc_t	*cpc;
68 	void	(*sigsaved)();
69 	int	error = 0;
70 	int	i;
71 	int	j;
72 
73 	if (ver != CPC_VER_CURRENT) {
74 		/*
75 		 * v1 clients must stick to the v1 interface: cpc_version()
76 		 */
77 		errno = EINVAL;
78 		return (NULL);
79 	}
80 
81 	/*
82 	 * Call the syscall with invalid parameters.  If we get ENOSYS this CPU
83 	 * has no CPC support.  We need to block SIGSYS because the syscall code
84 	 * will send the signal if the system call fails to load.
85 	 */
86 	sigsaved = signal(SIGSYS, SIG_IGN);
87 	if (syscall(SYS_cpc, -1, -1, -1, -1, -1) != -1) {
88 		(void) signal(SIGSYS, sigsaved);
89 		errno = EINVAL;
90 		return (NULL);
91 	}
92 	error = errno;
93 	(void) signal(SIGSYS, sigsaved);
94 
95 	if (error != EINVAL) {
96 		errno = error;
97 		return (NULL);
98 	}
99 
100 	if ((cpc = malloc(sizeof (cpc_t))) == NULL) {
101 		errno = ENOMEM;
102 		return (NULL);
103 	}
104 
105 	cpc->cpc_npic = syscall(SYS_cpc, CPC_NPIC, -1, 0, 0, 0);
106 	cpc->cpc_caps = syscall(SYS_cpc, CPC_CAPS, -1, 0, 0, 0);
107 
108 	if (syscall(SYS_cpc, CPC_IMPL_NAME, -1, &cpc->cpc_cciname, 0, 0) != 0)
109 		return (NULL);
110 	if (syscall(SYS_cpc, CPC_CPUREF, -1, &cpc->cpc_cpuref, 0, 0) != 0)
111 		return (NULL);
112 
113 
114 	if ((cpc->cpc_attrlist = cpc_get_list(CPC_LIST_ATTRS, 0)) == NULL) {
115 		free(cpc);
116 		return (NULL);
117 	}
118 
119 	if ((cpc->cpc_evlist = malloc(cpc->cpc_npic * sizeof (char *))) ==
120 	    NULL) {
121 		free(cpc->cpc_attrlist);
122 		free(cpc);
123 		return (NULL);
124 	}
125 
126 	for (i = 0; i < cpc->cpc_npic; i++) {
127 		if ((cpc->cpc_evlist[i] = cpc_get_list(CPC_LIST_EVENTS, i)) ==
128 		    NULL)
129 			break;
130 	}
131 	if (i != cpc->cpc_npic) {
132 		for (j = 0; j < i; j++)
133 			free(cpc->cpc_evlist[j]);
134 		free(cpc->cpc_evlist);
135 		free(cpc->cpc_attrlist);
136 		free(cpc);
137 		return (NULL);
138 	}
139 
140 	cpc->cpc_sets = NULL;
141 	cpc->cpc_bufs = NULL;
142 	cpc->cpc_errfn = NULL;
143 	(void) mutex_init(&cpc->cpc_lock, USYNC_THREAD, NULL);
144 	__pctx_cpc_register_callback(cpc_invalidate_pctx);
145 
146 	return (cpc);
147 }
148 
149 /*
150  * Ensure state is cleaned up:
151  *
152  * - Hardware is unbound
153  * - Sets are all destroyed
154  * - Bufs are all freed
155  */
156 int
157 cpc_close(cpc_t *cpc)
158 {
159 	while (cpc->cpc_sets != NULL) {
160 		if (cpc->cpc_sets->cs_state != CS_UNBOUND)
161 			(void) cpc_unbind(cpc, cpc->cpc_sets);
162 		(void) cpc_set_destroy(cpc, cpc->cpc_sets);
163 	}
164 
165 	while (cpc->cpc_bufs != NULL)
166 		(void) cpc_buf_destroy(cpc, cpc->cpc_bufs);
167 
168 	free(cpc);
169 	return (0);
170 }
171 
172 /*
173  * Terminate everything that runs in pctx_run
174  */
175 void
176 cpc_terminate(cpc_t *cpc)
177 {
178 	cpc_set_t	*csp;
179 	int		sigblocked;
180 
181 	sigblocked = cpc_lock(cpc);
182 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
183 		if (csp->cs_pctx != NULL)
184 			pctx_terminate(csp->cs_pctx);
185 	}
186 	cpc_unlock(cpc, sigblocked);
187 }
188 
189 cpc_set_t *
190 cpc_set_create(cpc_t *cpc)
191 {
192 	cpc_set_t	*set;
193 	int		sigblocked;
194 
195 	if ((set = malloc(sizeof (*set))) == NULL) {
196 		errno = ENOMEM;
197 		return (NULL);
198 	}
199 
200 	set->cs_request = NULL;
201 	set->cs_nreqs	= 0;
202 	set->cs_state	= CS_UNBOUND;
203 	set->cs_fd	= -1;
204 	set->cs_pctx	= NULL;
205 	set->cs_id	= -1;
206 	set->cs_thr	= 0;
207 
208 	sigblocked = cpc_lock(cpc);
209 	set->cs_next = cpc->cpc_sets;
210 	cpc->cpc_sets = set;
211 	cpc_unlock(cpc, sigblocked);
212 
213 	return (set);
214 }
215 
216 int
217 cpc_set_destroy(cpc_t *cpc, cpc_set_t *set)
218 {
219 	cpc_set_t	*csp, *prev;
220 	cpc_request_t	*req, *next;
221 	int		sigblocked;
222 
223 	/*
224 	 * Remove this set from the cpc handle's list of sets.
225 	 */
226 	sigblocked = cpc_lock(cpc);
227 	for (csp = prev = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
228 		if (csp == set)
229 			break;
230 		prev = csp;
231 	}
232 	if (csp == NULL) {
233 		cpc_unlock(cpc, sigblocked);
234 		errno = EINVAL;
235 		return (-1);
236 	}
237 	if (csp == cpc->cpc_sets)
238 		cpc->cpc_sets = csp->cs_next;
239 	prev->cs_next = csp->cs_next;
240 	cpc_unlock(cpc, sigblocked);
241 
242 	if (csp->cs_state != CS_UNBOUND)
243 		(void) cpc_unbind(cpc, csp);
244 
245 	/*
246 	 * Detach from the process
247 	 */
248 	if (csp->cs_pctx != NULL) {
249 		pctx_release(csp->cs_pctx);
250 		csp->cs_pctx = NULL;
251 	}
252 
253 	for (req = csp->cs_request; req != NULL; req = next) {
254 		next = req->cr_next;
255 
256 		if (req->cr_nattrs != 0)
257 			free(req->cr_attr);
258 
259 		free(req);
260 	}
261 
262 
263 	free(set);
264 
265 	return (0);
266 }
267 
268 /*ARGSUSED*/
269 int
270 cpc_set_add_request(cpc_t *cpc, cpc_set_t *set, const char *event,
271     uint64_t preset, uint_t flags, uint_t nattrs, const cpc_attr_t *attrs)
272 {
273 	cpc_request_t	*req;
274 	const char	*fn = "cpc_set_add_request";
275 	int		i;
276 	int		npics = cpc_npic(cpc);
277 
278 	if (cpc_set_valid(cpc, set) != 0 || set->cs_state != CS_UNBOUND) {
279 		errno = EINVAL;
280 		return (-1);
281 	}
282 
283 	for (i = 0; i < npics; i++)
284 		if (cpc_valid_event(cpc, i, event))
285 			break;
286 	if (i == npics) {
287 		cpc_err(cpc, fn, CPC_INVALID_EVENT);
288 		errno = EINVAL;
289 		return (-1);
290 	}
291 
292 	if ((req = malloc(sizeof (*req))) == NULL) {
293 		errno = ENOMEM;
294 		return (-1);
295 	}
296 
297 	(void) strncpy(req->cr_event, event, CPC_MAX_EVENT_LEN);
298 	req->cr_preset = preset;
299 	req->cr_flags = flags;
300 	req->cr_nattrs = nattrs;
301 	req->cr_index = set->cs_nreqs;
302 	req->cr_attr = NULL;
303 
304 	if (nattrs != 0) {
305 		for (i = 0; i < nattrs; i++) {
306 			/*
307 			 * Verify that each attribute name is legal and valid.
308 			 */
309 			if (attrs[i].ca_name[0] == '\0' ||
310 			    cpc_valid_attr(cpc, attrs[i].ca_name) == 0) {
311 				cpc_err(cpc, fn, CPC_INVALID_ATTRIBUTE);
312 				goto inval;
313 			}
314 
315 			/*
316 			 * If the user requested a specific picnum, ensure that
317 			 * the pic can count the requested event.
318 			 */
319 			if (strncmp("picnum", attrs[i].ca_name, 8) == 0) {
320 				if (attrs[i].ca_val >= npics) {
321 					cpc_err(cpc, fn, CPC_INVALID_PICNUM);
322 					goto inval;
323 				}
324 
325 				if (cpc_valid_event(cpc, attrs[i].ca_val,
326 				    req->cr_event) == 0) {
327 					cpc_err(cpc, fn, CPC_PIC_NOT_CAPABLE);
328 					goto inval;
329 				}
330 			}
331 		}
332 
333 		if ((req->cr_attr = malloc(nattrs * sizeof (kcpc_attr_t)))
334 		    == NULL) {
335 			free(req);
336 			return (-1);
337 		}
338 
339 		for (i = 0; i < nattrs; i++) {
340 			req->cr_attr[i].ka_val = attrs[i].ca_val;
341 			(void) strncpy(req->cr_attr[i].ka_name,
342 			    attrs[i].ca_name, CPC_MAX_ATTR_LEN);
343 		}
344 	} else
345 		req->cr_attr = NULL;
346 
347 	req->cr_next = set->cs_request;
348 	set->cs_request = req;
349 	set->cs_nreqs++;
350 
351 	return (req->cr_index);
352 
353 inval:
354 	free(req);
355 	errno = EINVAL;
356 	return (-1);
357 }
358 
359 cpc_buf_t *
360 cpc_buf_create(cpc_t *cpc, cpc_set_t *set)
361 {
362 	cpc_buf_t	*buf;
363 	int		sigblocked;
364 
365 	if (cpc_set_valid(cpc, set) != 0) {
366 		errno = EINVAL;
367 		return (NULL);
368 	}
369 
370 	if ((buf = malloc(sizeof (*buf))) == NULL)
371 		return (NULL);
372 
373 	buf->cb_size = set->cs_nreqs * sizeof (uint64_t);
374 	if ((buf->cb_data = malloc(buf->cb_size)) == NULL) {
375 		free(buf);
376 		return (NULL);
377 	}
378 
379 	bzero(buf->cb_data, buf->cb_size);
380 
381 	buf->cb_hrtime = 0;
382 	buf->cb_tick = 0;
383 
384 	sigblocked = cpc_lock(cpc);
385 	buf->cb_next = cpc->cpc_bufs;
386 	cpc->cpc_bufs = buf;
387 	cpc_unlock(cpc, sigblocked);
388 
389 	return (buf);
390 }
391 
392 int
393 cpc_buf_destroy(cpc_t *cpc, cpc_buf_t *buf)
394 {
395 	cpc_buf_t	*cbp, *prev;
396 	int		sigblocked;
397 
398 	/*
399 	 * Remove this buf from the cpc handle's list of bufs.
400 	 */
401 	sigblocked = cpc_lock(cpc);
402 	for (cbp = prev = cpc->cpc_bufs; cbp != NULL; cbp = cbp->cb_next) {
403 		if (cbp == buf)
404 			break;
405 		prev = cbp;
406 	}
407 	if (cbp == NULL) {
408 		cpc_unlock(cpc, sigblocked);
409 		errno = EINVAL;
410 		return (-1);
411 	}
412 	if (cbp == cpc->cpc_bufs)
413 		cpc->cpc_bufs = cbp->cb_next;
414 	prev->cb_next = cbp->cb_next;
415 
416 	cpc_unlock(cpc, sigblocked);
417 	free(cbp->cb_data);
418 	free(cbp);
419 
420 	return (0);
421 }
422 
423 /*ARGSUSED*/
424 int
425 cpc_bind_curlwp(cpc_t *cpc, cpc_set_t *set, uint_t flags)
426 {
427 	char		*packed_set;
428 	size_t		packsize;
429 	int		ret;
430 	int		subcode = -1;
431 
432 	/*
433 	 * We don't bother checking cpc_set_valid() here, because this is in the
434 	 * fast path of an app doing SIGEMT-based profiling as they restart the
435 	 * counters from their signal handler.
436 	 */
437 	if (CPC_SET_VALID_FLAGS(flags) == 0 || set->cs_nreqs <= 0) {
438 		errno = EINVAL;
439 		return (-1);
440 	}
441 
442 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
443 		errno = ENOMEM;
444 		return (-1);
445 	}
446 
447 	ret = syscall(SYS_cpc, CPC_BIND, -1, packed_set, packsize, &subcode);
448 	free(packed_set);
449 
450 	if (ret != 0) {
451 		if (subcode != -1)
452 			cpc_err(cpc, "cpc_bind_curlwp", subcode);
453 		return (-1);
454 	}
455 
456 	set->cs_thr = thr_self();
457 	set->cs_state = CS_BOUND_CURLWP;
458 	return (ret);
459 }
460 
461 /*ARGSUSED*/
462 int
463 cpc_bind_pctx(cpc_t *cpc, pctx_t *pctx, id_t id, cpc_set_t *set, uint_t flags)
464 {
465 	char		*packed_set;
466 	size_t		packsize;
467 	int		ret;
468 	int		subcode = -1;
469 
470 	/*
471 	 * cpc_bind_pctx() currently has no valid flags.
472 	 */
473 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
474 		errno = EINVAL;
475 		return (-1);
476 	}
477 
478 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
479 		errno = ENOMEM;
480 		return (-1);
481 	}
482 
483 	ret = __pctx_cpc(pctx, cpc, CPC_BIND, id, packed_set, (void *)packsize,
484 	    (void *)&subcode, -1);
485 
486 	free(packed_set);
487 
488 	if (ret == 0) {
489 		set->cs_pctx = pctx;
490 		set->cs_id = id;
491 		set->cs_state = CS_BOUND_PCTX;
492 	} else if (subcode != -1)
493 		cpc_err(cpc, "cpc_bind_pctx", subcode);
494 
495 	return (ret);
496 }
497 
498 /*ARGSUSED*/
499 int
500 cpc_bind_cpu(cpc_t *cpc, processorid_t id, cpc_set_t *set, uint_t flags)
501 {
502 	int		fd;
503 	char		*packed_set;
504 	size_t		packsize;
505 	__cpc_args_t	cpc_args;
506 	int		error;
507 	const char	*fn = "cpc_bind_cpu";
508 	int		subcode = -1;
509 
510 	/*
511 	 * cpc_bind_cpu() currently has no valid flags.
512 	 */
513 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
514 		errno = EINVAL;
515 		return (-1);
516 	}
517 
518 	if (processor_bind(P_LWPID, P_MYID, id, &set->cs_obind) == -1) {
519 		cpc_err(cpc, fn, CPC_PBIND_FAILED);
520 		return (-1);
521 	}
522 
523 	if ((fd = open(CPUDRV_SHARED, O_RDWR)) < 0) {
524 		error = errno;
525 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
526 		errno = error;
527 		return (-1);
528 	}
529 
530 	/*
531 	 * To avoid leaking file descriptors, if we find an existing fd here we
532 	 * just close it. This is only a problem if a user attempts to bind the
533 	 * same set to different CPUs without first unbinding it.
534 	 */
535 	if (set->cs_fd != -1)
536 		(void) close(set->cs_fd);
537 	set->cs_fd = fd;
538 
539 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
540 		(void) close(fd);
541 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
542 		errno = ENOMEM;
543 		return (-1);
544 	}
545 
546 	cpc_args.udata1 = packed_set;
547 	cpc_args.udata2 = (void *)packsize;
548 	cpc_args.udata3 = (void *)&subcode;
549 
550 	if (ioctl(fd, CPCIO_BIND, &cpc_args) != 0) {
551 		error = errno;
552 		free(packed_set);
553 		(void) close(fd);
554 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
555 		if (subcode != -1)
556 			cpc_err(cpc, fn, subcode);
557 		errno = error;
558 		return (-1);
559 	}
560 
561 	free(packed_set);
562 
563 	set->cs_thr = thr_self();
564 	set->cs_state = CS_BOUND_CPU;
565 
566 	return (0);
567 }
568 
569 /*ARGSUSED*/
570 int
571 cpc_request_preset(cpc_t *cpc, int index, uint64_t preset)
572 {
573 	return (syscall(SYS_cpc, CPC_PRESET, -1, index,
574 	    (uint32_t)(preset >> 32), (uint32_t)(preset & MASK32)));
575 }
576 
577 /*ARGSUSED*/
578 int
579 cpc_set_restart(cpc_t *cpc, cpc_set_t *set)
580 {
581 	return (syscall(SYS_cpc, CPC_RESTART, -1, 0, 0, 0));
582 }
583 
584 /*ARGSUSED*/
585 int
586 cpc_unbind(cpc_t *cpc, cpc_set_t *set)
587 {
588 	int		ret = 0;
589 	int		error;
590 
591 	if (cpc_set_valid(cpc, set) != 0) {
592 		errno = EINVAL;
593 		return (-1);
594 	}
595 
596 	switch (set->cs_state) {
597 	case CS_UNBOUND:
598 		errno = EINVAL;
599 		return (-1);
600 	case CS_BOUND_CURLWP:
601 		ret = syscall(SYS_cpc, CPC_RELE, -1, 0, 0, 0);
602 		error = errno;
603 		break;
604 	case CS_BOUND_CPU:
605 		ret = ioctl(set->cs_fd, CPCIO_RELE, NULL);
606 		error = errno;
607 		(void) close(set->cs_fd);
608 		set->cs_fd = -1;
609 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
610 		break;
611 	case CS_BOUND_PCTX:
612 		if (set->cs_pctx != NULL) {
613 			ret = __pctx_cpc(set->cs_pctx, cpc, CPC_RELE,
614 			    set->cs_id, 0, 0, 0, 0);
615 			error = errno;
616 		}
617 		break;
618 	}
619 
620 	set->cs_thr = 0;
621 	set->cs_id = -1;
622 	set->cs_state = CS_UNBOUND;
623 	if (ret != 0)
624 		errno = error;
625 	return (ret);
626 }
627 
628 /*ARGSUSED*/
629 int
630 cpc_set_sample(cpc_t *cpc, cpc_set_t *set, cpc_buf_t *buf)
631 {
632 	__cpc_args_t args;
633 
634 	/*
635 	 * The following check ensures that only the most recently bound set
636 	 * can be sampled, as binding a set invalidates all other sets in the
637 	 * cpc_t.
638 	 */
639 	if (set->cs_state == CS_UNBOUND ||
640 	    buf->cb_size != set->cs_nreqs * sizeof (uint64_t)) {
641 		errno = EINVAL;
642 		return (-1);
643 	}
644 
645 	switch (set->cs_state) {
646 	case CS_BOUND_CURLWP:
647 		return (syscall(SYS_cpc, CPC_SAMPLE, -1, buf->cb_data,
648 		    &buf->cb_hrtime, &buf->cb_tick));
649 	case CS_BOUND_CPU:
650 		args.udata1 = buf->cb_data;
651 		args.udata2 = &buf->cb_hrtime;
652 		args.udata3 = &buf->cb_tick;
653 		return (ioctl(set->cs_fd, CPCIO_SAMPLE, &args));
654 	case CS_BOUND_PCTX:
655 		return (__pctx_cpc(set->cs_pctx, cpc, CPC_SAMPLE, set->cs_id,
656 		    buf->cb_data, &buf->cb_hrtime, &buf->cb_tick,
657 		    buf->cb_size));
658 	}
659 
660 	errno = EINVAL;
661 	return (-1);
662 }
663 
664 /*ARGSUSED*/
665 void
666 cpc_buf_sub(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
667 {
668 	int i;
669 
670 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
671 		return;
672 
673 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
674 	    a->cb_hrtime : b->cb_hrtime;
675 	ds->cb_tick = a->cb_tick - b->cb_tick;
676 
677 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
678 		ds->cb_data[i] = a->cb_data[i] - b->cb_data[i];
679 }
680 
681 /*ARGSUSED*/
682 void
683 cpc_buf_add(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
684 {
685 	int i;
686 
687 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
688 		return;
689 
690 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
691 	    a->cb_hrtime : b->cb_hrtime;
692 	ds->cb_tick = a->cb_tick + b->cb_tick;
693 
694 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
695 		ds->cb_data[i] = a->cb_data[i] + b->cb_data[i];
696 }
697 
698 /*ARGSUSED*/
699 void
700 cpc_buf_copy(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *src)
701 {
702 	if (ds->cb_size != src->cb_size)
703 		return;
704 
705 	bcopy(src->cb_data, ds->cb_data, ds->cb_size);
706 	ds->cb_hrtime = src->cb_hrtime;
707 	ds->cb_tick = src->cb_tick;
708 }
709 
710 /*ARGSUSED*/
711 void
712 cpc_buf_zero(cpc_t *cpc, cpc_buf_t *buf)
713 {
714 	bzero(buf->cb_data, buf->cb_size);
715 	buf->cb_hrtime = 0;
716 	buf->cb_tick = 0;
717 }
718 
719 /*
720  * Gets or sets the value of the request specified by index.
721  */
722 /*ARGSUSED*/
723 int
724 cpc_buf_get(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t *val)
725 {
726 	*val = buf->cb_data[index];
727 
728 	return (0);
729 }
730 
731 /*ARGSUSED*/
732 int
733 cpc_buf_set(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t val)
734 {
735 	buf->cb_data[index] = val;
736 
737 	return (0);
738 }
739 
740 /*ARGSUSED*/
741 hrtime_t
742 cpc_buf_hrtime(cpc_t *cpc, cpc_buf_t *buf)
743 {
744 	return (buf->cb_hrtime);
745 }
746 
747 /*ARGSUSED*/
748 uint64_t
749 cpc_buf_tick(cpc_t *cpc, cpc_buf_t *buf)
750 {
751 	return (buf->cb_tick);
752 }
753 
754 static char *
755 cpc_get_list(int which, int arg)
756 {
757 	int	szcmd;
758 	int	size;
759 	char	*list;
760 
761 	if (which == CPC_LIST_ATTRS)
762 		szcmd = CPC_ATTRLIST_SIZE;
763 	else
764 		szcmd = CPC_EVLIST_SIZE;
765 
766 	if (syscall(SYS_cpc, szcmd, -1, &size, arg, 0) != 0)
767 		return (NULL);
768 
769 	if ((list = malloc(size)) == NULL)
770 		return (NULL);
771 
772 	if (syscall(SYS_cpc, which, -1, list, arg, 0) != 0) {
773 		free(list);
774 		return (NULL);
775 	}
776 
777 	return (list);
778 }
779 
780 /*ARGSUSED*/
781 void
782 cpc_walk_requests(cpc_t *cpc, cpc_set_t *set, void *arg,
783     void (*action)(void *arg, int index, const char *event, uint64_t preset,
784     uint_t flags, int nattrs, const cpc_attr_t *attrs))
785 {
786 	cpc_request_t	*rp;
787 	cpc_attr_t	*attrs = NULL;
788 	int		i;
789 
790 	for (rp = set->cs_request; rp != NULL; rp = rp->cr_next) {
791 		/*
792 		 * Need to reconstruct a temporary cpc_attr_t array for req.
793 		 */
794 		if (rp->cr_nattrs != 0)
795 			if ((attrs = malloc(rp->cr_nattrs *
796 			    sizeof (cpc_attr_t))) == NULL)
797 				return;
798 		for (i = 0; i < rp->cr_nattrs; i++) {
799 			attrs[i].ca_name = rp->cr_attr[i].ka_name;
800 			attrs[i].ca_val = rp->cr_attr[i].ka_val;
801 		}
802 
803 		action(arg, rp->cr_index, rp->cr_event, rp->cr_preset,
804 		    rp->cr_flags, rp->cr_nattrs, attrs);
805 
806 		if (rp->cr_nattrs != 0)
807 			free(attrs);
808 	}
809 }
810 
811 /*ARGSUSED*/
812 static void
813 cpc_walk_events_impl(cpc_t *cpc, void *arg,
814     void (*action)(void *arg, const char *event), int is_generic)
815 {
816 	char		**list;
817 	char		*p, *e;
818 	int		i;
819 	int		is_papi;
820 	int		ncounters = cpc_npic(cpc);
821 	cpc_strhash_t	*hash;
822 
823 	if ((list = malloc(ncounters * sizeof (char *))) == NULL)
824 		return;
825 
826 	if ((hash = __cpc_strhash_alloc()) == NULL) {
827 		free(list);
828 		return;
829 	}
830 
831 	for (i = 0; i < ncounters; i++) {
832 		if ((list[i] = strdup(cpc->cpc_evlist[i])) == NULL)
833 			goto err;
834 		p = list[i];
835 		while ((e = strchr(p, ',')) != NULL) {
836 			*e = '\0';
837 
838 			/*
839 			 * Based on is_generic flag, skip appropriate
840 			 * event names.
841 			 */
842 			is_papi = (strncmp(p, "PAPI", 4) == 0);
843 			if (is_generic != is_papi) {
844 				p = e + 1;
845 				continue;
846 			}
847 
848 			if (__cpc_strhash_add(hash, p) == -1)
849 				goto err;
850 
851 			p = e + 1;
852 		}
853 
854 		is_papi = (strncmp(p, "PAPI", 4) == 0);
855 		if (is_generic == is_papi) {
856 			if (__cpc_strhash_add(hash, p) == -1)
857 				goto err;
858 		}
859 	}
860 
861 	while ((p = __cpc_strhash_next(hash)) != NULL)
862 		action(arg, p);
863 
864 err:
865 	__cpc_strhash_free(hash);
866 	for (i = 0; i < ncounters; i++)
867 		free(list[i]);
868 	free(list);
869 }
870 
871 /*ARGSUSED*/
872 void
873 cpc_walk_events_all(cpc_t *cpc, void *arg,
874     void (*action)(void *arg, const char *event))
875 {
876 	cpc_walk_events_impl(cpc, arg, action, 0);
877 }
878 
879 
880 /*ARGSUSED*/
881 void
882 cpc_walk_generic_events_all(cpc_t *cpc, void *arg,
883     void (*action)(void *arg, const char *event))
884 {
885 	cpc_walk_events_impl(cpc, arg, action, 1);
886 }
887 
888 /*ARGSUSED*/
889 static void
890 cpc_walk_events_pic_impl(cpc_t *cpc, uint_t picno, void *arg,
891     void (*action)(void *arg, uint_t picno, const char *event), int is_generic)
892 {
893 	char	*p;
894 	char	*e;
895 	char	*list;
896 	int	is_papi;
897 
898 	if (picno >= cpc->cpc_npic) {
899 		errno = EINVAL;
900 		return;
901 	}
902 
903 	if ((list = strdup(cpc->cpc_evlist[picno])) == NULL)
904 		return;
905 
906 	/*
907 	 * List now points to a comma-separated list of events supported by
908 	 * the designated pic.
909 	 */
910 	p = list;
911 	while ((e = strchr(p, ',')) != NULL) {
912 		*e = '\0';
913 
914 		/*
915 		 * Based on is_generic flag, skip appropriate
916 		 * event names.
917 		 */
918 		is_papi = (strncmp(p, "PAPI", 4) == 0);
919 		if (is_generic != is_papi) {
920 			p = e + 1;
921 			continue;
922 		}
923 
924 		action(arg, picno, p);
925 		p = e + 1;
926 	}
927 
928 	is_papi = (strncmp(p, "PAPI", 4) == 0);
929 	if (is_generic == is_papi)
930 		action(arg, picno, p);
931 
932 	free(list);
933 }
934 
935 /*ARGSUSED*/
936 void
937 cpc_walk_events_pic(cpc_t *cpc, uint_t picno, void *arg,
938     void (*action)(void *arg, uint_t picno, const char *event))
939 {
940 	cpc_walk_events_pic_impl(cpc, picno, arg, action, 0);
941 }
942 
943 /*ARGSUSED*/
944 void
945 cpc_walk_generic_events_pic(cpc_t *cpc, uint_t picno, void *arg,
946     void (*action)(void *arg, uint_t picno, const char *event))
947 {
948 	cpc_walk_events_pic_impl(cpc, picno, arg, action, 1);
949 }
950 
951 /*ARGSUSED*/
952 void
953 cpc_walk_attrs(cpc_t *cpc, void *arg,
954     void (*action)(void *arg, const char *attr))
955 {
956 	char	*p;
957 	char	*e;
958 	char	*list;
959 
960 	if ((list = strdup(cpc->cpc_attrlist)) == NULL)
961 		return;
962 
963 	/*
964 	 * Platforms with no attributes will return an empty string.
965 	 */
966 	if (*list == '\0')
967 		return;
968 
969 	/*
970 	 * List now points to a comma-separated list of attributes supported by
971 	 * the underlying platform.
972 	 */
973 	p = list;
974 	while ((e = strchr(p, ',')) != NULL) {
975 		*e = '\0';
976 		action(arg, p);
977 		p = e + 1;
978 	}
979 	action(arg, p);
980 
981 	free(list);
982 }
983 
984 /*ARGSUSED*/
985 int
986 cpc_enable(cpc_t *cpc)
987 {
988 	return (syscall(SYS_cpc, CPC_ENABLE, -1, 0, 0, 0));
989 }
990 
991 /*ARGSUSED*/
992 int
993 cpc_disable(cpc_t *cpc)
994 {
995 	return (syscall(SYS_cpc, CPC_DISABLE, -1, 0, 0, 0));
996 }
997 
998 /*ARGSUSED*/
999 uint_t
1000 cpc_npic(cpc_t *cpc)
1001 {
1002 	return (cpc->cpc_npic);
1003 }
1004 
1005 /*ARGSUSED*/
1006 uint_t
1007 cpc_caps(cpc_t *cpc)
1008 {
1009 	return (cpc->cpc_caps);
1010 }
1011 
1012 const char *
1013 cpc_cciname(cpc_t *cpc)
1014 {
1015 	return (cpc->cpc_cciname);
1016 }
1017 
1018 const char *
1019 cpc_cpuref(cpc_t *cpc)
1020 {
1021 	return (cpc->cpc_cpuref);
1022 }
1023 
1024 int
1025 cpc_seterrhndlr(cpc_t *cpc, cpc_errhndlr_t *fn)
1026 {
1027 	cpc->cpc_errfn = fn;
1028 	return (0);
1029 }
1030 
1031 /*
1032  * These strings may contain printf() conversion specifiers.
1033  */
1034 static const char *errstr[] = {
1035 "",						/* zero slot filler */
1036 "Unknown event\n",				/* CPC_INVALID_EVENT */
1037 "Invalid counter number\n",			/* CPC_INVALID_PICNUM */
1038 "Unknown attribute\n",				/* CPC_INVALID_ATTRIBUTE */
1039 "Attribute out of range\n",			/* CPC_ATTRIBUTE_OUT_OF_RANGE */
1040 "Hardware resource unavailable\n",		/* CPC_RESOURCE_UNAVAIL */
1041 "Counter cannot count requested event\n",	/* CPC_PIC_NOT_CAPABLE */
1042 "Invalid flags in a request\n",			/* CPC_REQ_INVALID_FLAGS */
1043 "Requests conflict with each other\n",		/* CPC_CONFLICTING_REQS */
1044 "Attribute requires the cpc_cpu privilege\n",  /* CPC_ATTR_REQUIRES_PRIVILEGE */
1045 "Couldn't bind LWP to requested processor\n",	/* CPC_PBIND_FAILED */
1046 "Hypervisor event access denied\n"		/* CPC_HV_NO_ACCESS */
1047 };
1048 
1049 /*VARARGS3*/
1050 static void
1051 cpc_err(cpc_t *cpc, const char *fn, int subcode, ...)
1052 {
1053 	va_list		ap;
1054 	const char	*str;
1055 	int		error;
1056 
1057 	/*
1058 	 * If subcode is -1, there is no specific description for this error.
1059 	 */
1060 	if (subcode == -1)
1061 		return;
1062 
1063 	/*
1064 	 * We need to preserve errno across calls to this function to prevent it
1065 	 * from being clobbered while here, or in the user's error handler.
1066 	 */
1067 	error = errno;
1068 
1069 	str = dgettext(TEXT_DOMAIN, errstr[subcode]);
1070 
1071 	va_start(ap, subcode);
1072 	if (cpc->cpc_errfn != NULL)
1073 		cpc->cpc_errfn(fn, subcode, str, ap);
1074 	else {
1075 		/*
1076 		 * If printf() conversion specifiers are added to the errstr[]
1077 		 * table, this call needs to be changed to vfprintf().
1078 		 */
1079 		(void) fprintf(stderr, "libcpc: %s: %s", fn, str);
1080 	}
1081 	va_end(ap);
1082 
1083 	errno = error;
1084 }
1085 
1086 /*
1087  * Hook used by libpctx to alert libcpc when a pctx handle is going away.
1088  * This is necessary to prevent libcpc from attempting a libpctx operation on a
1089  * stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
1090  * to be notified when they go away.
1091  */
1092 static void
1093 cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx)
1094 {
1095 	cpc_set_t	*set;
1096 	int		sigblocked;
1097 
1098 	sigblocked = cpc_lock(cpc);
1099 	for (set = cpc->cpc_sets; set != NULL; set = set->cs_next)
1100 		if (set->cs_pctx == pctx)
1101 			set->cs_pctx = NULL;
1102 	cpc_unlock(cpc, sigblocked);
1103 }
1104 
1105 /*
1106  * Check that the set is valid; if so it will be in the cpc handle's
1107  * list of sets. The lock protects the list of sets, but not the set
1108  * itself.
1109  */
1110 static int
1111 cpc_set_valid(cpc_t *cpc, cpc_set_t *set)
1112 {
1113 	cpc_set_t	*csp;
1114 	int		sigblocked;
1115 
1116 	sigblocked = cpc_lock(cpc);
1117 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next)
1118 		if (csp == set)
1119 			break;
1120 	cpc_unlock(cpc, sigblocked);
1121 	if (csp == NULL)
1122 		return (-1);
1123 	return (0);
1124 }
1125 
1126 static int
1127 cpc_lock(cpc_t *cpc)
1128 {
1129 	int ret = (sigset(SIGEMT, SIG_HOLD) == SIG_HOLD);
1130 	(void) mutex_lock(&cpc->cpc_lock);
1131 	return (ret);
1132 }
1133 
1134 static void
1135 cpc_unlock(cpc_t *cpc, int sigblocked)
1136 {
1137 	(void) mutex_unlock(&cpc->cpc_lock);
1138 	if (sigblocked == 0)
1139 		(void) sigrelse(SIGEMT);
1140 }
1141 
1142 struct priv {
1143 	const char *name;
1144 	int found;
1145 };
1146 
1147 /*ARGSUSED*/
1148 static void
1149 ev_walker(void *arg, uint_t picno, const char *ev)
1150 {
1151 	if (strcmp(((struct priv *)arg)->name, ev) == 0)
1152 		((struct priv *)arg)->found = 1;
1153 }
1154 
1155 static void
1156 at_walker(void *arg, const char *at)
1157 {
1158 	if (strcmp(((struct priv *)arg)->name, at) == 0)
1159 		((struct priv *)arg)->found = 1;
1160 }
1161 
1162 static int
1163 cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev)
1164 {
1165 	struct priv pr = { NULL, 0 };
1166 	char *end_ev;
1167 	int err;
1168 
1169 	pr.name = ev;
1170 	cpc_walk_events_pic(cpc, pic, &pr, ev_walker);
1171 	if (pr.found)
1172 		return (1);
1173 
1174 	cpc_walk_generic_events_pic(cpc, pic, &pr, ev_walker);
1175 	if (pr.found)
1176 		return (1);
1177 
1178 	/*
1179 	 * Before assuming this is an invalid event, see if we have been given
1180 	 * a raw event code.
1181 	 * Check the second argument of strtol() to ensure invalid events
1182 	 * beginning with number do not go through.
1183 	 */
1184 	err = errno;
1185 	errno = 0;
1186 	(void) strtol(ev, &end_ev, 0);
1187 	if ((errno == 0) && (*end_ev == '\0')) {
1188 		/*
1189 		 * Success - this is a valid raw code in hex, decimal, or octal.
1190 		 */
1191 		errno = err;
1192 		return (1);
1193 	}
1194 
1195 	errno = err;
1196 	return (0);
1197 }
1198 
1199 static int
1200 cpc_valid_attr(cpc_t *cpc, char *attr)
1201 {
1202 	struct priv pr = { NULL, 0 };
1203 
1204 	pr.name = attr;
1205 	cpc_walk_attrs(cpc, &pr, at_walker);
1206 	return (pr.found);
1207 }
1208