xref: /titanic_44/usr/src/lib/libcpc/common/libcpc.c (revision 444ce08e035c2cafaa89f7236e38bbe82a287904)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <libcpc.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <strings.h>
33 #include <unistd.h>
34 #include <stropts.h>
35 #include <libintl.h>
36 #include <signal.h>
37 #include <sys/syscall.h>
38 #include <sys/types.h>
39 #include <sys/processor.h>
40 #include <sys/procset.h>
41 
42 #include "libcpc_impl.h"
43 
44 #define	MASK32 0xFFFFFFFF
45 
46 /*
47  * The library uses the cpc_lock field of the cpc_t struct to protect access to
48  * the linked lists inside the cpc_t, and only the linked lists. It is NOT used
49  * to protect against a user shooting his/herself in the foot (such as, for
50  * instance, destroying the same set at the same time from different threads.).
51  *
52  * SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
53  * an app holding the lock and a signal handler attempting to sample or bind.
54  */
55 
56 static char *cpc_get_list(int which, int arg);
57 static void cpc_err(cpc_t *cpc, const char *fn, int subcode, ...);
58 static int cpc_set_valid(cpc_t *cpc, cpc_set_t *set);
59 static int cpc_lock(cpc_t *cpc);
60 static void cpc_unlock(cpc_t *cpc, int blocked);
61 static int cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev);
62 static int cpc_valid_attr(cpc_t *cpc, char *attr);
63 static void cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx);
64 
65 cpc_t *
66 cpc_open(int ver)
67 {
68 	cpc_t	*cpc;
69 	void	(*sigsaved)();
70 	int	error = 0;
71 	int	i;
72 	int	j;
73 
74 	if (ver != CPC_VER_CURRENT) {
75 		/*
76 		 * v1 clients must stick to the v1 interface: cpc_version()
77 		 */
78 		errno = EINVAL;
79 		return (NULL);
80 	}
81 
82 	/*
83 	 * Call the syscall with invalid parameters.  If we get ENOSYS this CPU
84 	 * has no CPC support.  We need to block SIGSYS because the syscall code
85 	 * will send the signal if the system call fails to load.
86 	 */
87 	sigsaved = signal(SIGSYS, SIG_IGN);
88 	if (syscall(SYS_cpc, -1, -1, -1, -1, -1) != -1) {
89 		(void) signal(SIGSYS, sigsaved);
90 		errno = EINVAL;
91 		return (NULL);
92 	}
93 	error = errno;
94 	(void) signal(SIGSYS, sigsaved);
95 
96 	if (error != EINVAL) {
97 		errno = error;
98 		return (NULL);
99 	}
100 
101 	if ((cpc = malloc(sizeof (cpc_t))) == NULL) {
102 		errno = ENOMEM;
103 		return (NULL);
104 	}
105 
106 	cpc->cpc_npic = syscall(SYS_cpc, CPC_NPIC, -1, 0, 0, 0);
107 	cpc->cpc_caps = syscall(SYS_cpc, CPC_CAPS, -1, 0, 0, 0);
108 
109 	if (syscall(SYS_cpc, CPC_IMPL_NAME, -1, &cpc->cpc_cciname, 0, 0) != 0)
110 		return (NULL);
111 	if (syscall(SYS_cpc, CPC_CPUREF, -1, &cpc->cpc_cpuref, 0, 0) != 0)
112 		return (NULL);
113 
114 
115 	if ((cpc->cpc_attrlist = cpc_get_list(CPC_LIST_ATTRS, 0)) == NULL) {
116 		free(cpc);
117 		return (NULL);
118 	}
119 
120 	if ((cpc->cpc_evlist = malloc(cpc->cpc_npic * sizeof (char *))) ==
121 	    NULL) {
122 		free(cpc->cpc_attrlist);
123 		free(cpc);
124 		return (NULL);
125 	}
126 
127 	for (i = 0; i < cpc->cpc_npic; i++) {
128 		if ((cpc->cpc_evlist[i] = cpc_get_list(CPC_LIST_EVENTS, i)) ==
129 		    NULL)
130 			break;
131 	}
132 	if (i != cpc->cpc_npic) {
133 		for (j = 0; j < i; j++)
134 			free(cpc->cpc_evlist[j]);
135 		free(cpc->cpc_evlist);
136 		free(cpc->cpc_attrlist);
137 		free(cpc);
138 		return (NULL);
139 	}
140 
141 	cpc->cpc_sets = NULL;
142 	cpc->cpc_bufs = NULL;
143 	cpc->cpc_errfn = NULL;
144 	(void) mutex_init(&cpc->cpc_lock, USYNC_THREAD, NULL);
145 	__pctx_cpc_register_callback(cpc_invalidate_pctx);
146 
147 	return (cpc);
148 }
149 
150 /*
151  * Ensure state is cleaned up:
152  *
153  * - Hardware is unbound
154  * - Sets are all destroyed
155  * - Bufs are all freed
156  */
157 int
158 cpc_close(cpc_t *cpc)
159 {
160 	while (cpc->cpc_sets != NULL) {
161 		if (cpc->cpc_sets->cs_state != CS_UNBOUND)
162 			(void) cpc_unbind(cpc, cpc->cpc_sets);
163 		(void) cpc_set_destroy(cpc, cpc->cpc_sets);
164 	}
165 
166 	while (cpc->cpc_bufs != NULL)
167 		(void) cpc_buf_destroy(cpc, cpc->cpc_bufs);
168 
169 	free(cpc);
170 	return (0);
171 }
172 
173 cpc_set_t *
174 cpc_set_create(cpc_t *cpc)
175 {
176 	cpc_set_t	*set;
177 	int		sigblocked;
178 
179 	if ((set = malloc(sizeof (*set))) == NULL) {
180 		errno = ENOMEM;
181 		return (NULL);
182 	}
183 
184 	set->cs_request = NULL;
185 	set->cs_nreqs	= 0;
186 	set->cs_state	= CS_UNBOUND;
187 	set->cs_fd	= -1;
188 	set->cs_pctx	= NULL;
189 	set->cs_id	= -1;
190 	set->cs_thr	= NULL;
191 
192 	sigblocked = cpc_lock(cpc);
193 	set->cs_next = cpc->cpc_sets;
194 	cpc->cpc_sets = set;
195 	cpc_unlock(cpc, sigblocked);
196 
197 	return (set);
198 }
199 
200 int
201 cpc_set_destroy(cpc_t *cpc, cpc_set_t *set)
202 {
203 	cpc_set_t	*csp, *prev;
204 	cpc_request_t	*req, *next;
205 	int		sigblocked;
206 
207 	/*
208 	 * Remove this set from the cpc handle's list of sets.
209 	 */
210 	sigblocked = cpc_lock(cpc);
211 	for (csp = prev = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
212 		if (csp == set)
213 			break;
214 		prev = csp;
215 	}
216 	if (csp == NULL) {
217 		cpc_unlock(cpc, sigblocked);
218 		errno = EINVAL;
219 		return (-1);
220 	}
221 	if (csp == cpc->cpc_sets)
222 		cpc->cpc_sets = csp->cs_next;
223 	prev->cs_next = csp->cs_next;
224 	cpc_unlock(cpc, sigblocked);
225 
226 	if (csp->cs_state != CS_UNBOUND)
227 		(void) cpc_unbind(cpc, csp);
228 
229 	for (req = csp->cs_request; req != NULL; req = next) {
230 		next = req->cr_next;
231 
232 		if (req->cr_nattrs != 0)
233 			free(req->cr_attr);
234 
235 		free(req);
236 	}
237 
238 
239 	free(set);
240 
241 	return (0);
242 }
243 
244 /*ARGSUSED*/
245 int
246 cpc_set_add_request(cpc_t *cpc, cpc_set_t *set, const char *event,
247     uint64_t preset, uint_t flags, uint_t nattrs, const cpc_attr_t *attrs)
248 {
249 	cpc_request_t	*req;
250 	const char	*fn = "cpc_set_add_request";
251 	int		i;
252 	int		npics = cpc_npic(cpc);
253 
254 	if (cpc_set_valid(cpc, set) != 0 || set->cs_state != CS_UNBOUND) {
255 		errno = EINVAL;
256 		return (-1);
257 	}
258 
259 	for (i = 0; i < npics; i++)
260 		if (cpc_valid_event(cpc, i, event))
261 			break;
262 	if (i == npics) {
263 		cpc_err(cpc, fn, CPC_INVALID_EVENT);
264 		errno = EINVAL;
265 		return (-1);
266 	}
267 
268 	if ((req = malloc(sizeof (*req))) == NULL) {
269 		errno = ENOMEM;
270 		return (-1);
271 	}
272 
273 	(void) strncpy(req->cr_event, event, CPC_MAX_EVENT_LEN);
274 	req->cr_preset = preset;
275 	req->cr_flags = flags;
276 	req->cr_nattrs = nattrs;
277 	req->cr_index = set->cs_nreqs;
278 	req->cr_attr = NULL;
279 
280 	if (nattrs != 0) {
281 		for (i = 0; i < nattrs; i++) {
282 			/*
283 			 * Verify that each attribute name is legal and valid.
284 			 */
285 			if (attrs[i].ca_name[0] == '\0' ||
286 			    cpc_valid_attr(cpc, attrs[i].ca_name) == 0) {
287 				cpc_err(cpc, fn, CPC_INVALID_ATTRIBUTE);
288 				goto inval;
289 			}
290 
291 			/*
292 			 * If the user requested a specific picnum, ensure that
293 			 * the pic can count the requested event.
294 			 */
295 			if (strncmp("picnum", attrs[i].ca_name, 8) == 0) {
296 				if (attrs[i].ca_val >= npics) {
297 					cpc_err(cpc, fn, CPC_INVALID_PICNUM);
298 					goto inval;
299 				}
300 
301 				if (cpc_valid_event(cpc, attrs[i].ca_val,
302 				    req->cr_event) == 0) {
303 					cpc_err(cpc, fn, CPC_PIC_NOT_CAPABLE);
304 					goto inval;
305 				}
306 			}
307 		}
308 
309 		if ((req->cr_attr = malloc(nattrs * sizeof (kcpc_attr_t)))
310 		    == NULL) {
311 			free(req);
312 			return (-1);
313 		}
314 
315 		for (i = 0; i < nattrs; i++) {
316 			req->cr_attr[i].ka_val = attrs[i].ca_val;
317 			(void) strncpy(req->cr_attr[i].ka_name,
318 			    attrs[i].ca_name, CPC_MAX_ATTR_LEN);
319 		}
320 	} else
321 		req->cr_attr = NULL;
322 
323 	req->cr_next = set->cs_request;
324 	set->cs_request = req;
325 	set->cs_nreqs++;
326 
327 	return (req->cr_index);
328 
329 inval:
330 	free(req);
331 	errno = EINVAL;
332 	return (-1);
333 }
334 
335 cpc_buf_t *
336 cpc_buf_create(cpc_t *cpc, cpc_set_t *set)
337 {
338 	cpc_buf_t	*buf;
339 	int		sigblocked;
340 
341 	if (cpc_set_valid(cpc, set) != 0) {
342 		errno = EINVAL;
343 		return (NULL);
344 	}
345 
346 	if ((buf = malloc(sizeof (*buf))) == NULL)
347 		return (NULL);
348 
349 	buf->cb_size = set->cs_nreqs * sizeof (uint64_t);
350 	if ((buf->cb_data = malloc(buf->cb_size)) == NULL) {
351 		free(buf);
352 		return (NULL);
353 	}
354 
355 	bzero(buf->cb_data, buf->cb_size);
356 
357 	buf->cb_hrtime = 0;
358 	buf->cb_tick = 0;
359 
360 	sigblocked = cpc_lock(cpc);
361 	buf->cb_next = cpc->cpc_bufs;
362 	cpc->cpc_bufs = buf;
363 	cpc_unlock(cpc, sigblocked);
364 
365 	return (buf);
366 }
367 
368 int
369 cpc_buf_destroy(cpc_t *cpc, cpc_buf_t *buf)
370 {
371 	cpc_buf_t	*cbp, *prev;
372 	int		sigblocked;
373 
374 	/*
375 	 * Remove this buf from the cpc handle's list of bufs.
376 	 */
377 	sigblocked = cpc_lock(cpc);
378 	for (cbp = prev = cpc->cpc_bufs; cbp != NULL; cbp = cbp->cb_next) {
379 		if (cbp == buf)
380 			break;
381 		prev = cbp;
382 	}
383 	if (cbp == NULL) {
384 		cpc_unlock(cpc, sigblocked);
385 		errno = EINVAL;
386 		return (-1);
387 	}
388 	if (cbp == cpc->cpc_bufs)
389 		cpc->cpc_bufs = cbp->cb_next;
390 	prev->cb_next = cbp->cb_next;
391 
392 	cpc_unlock(cpc, sigblocked);
393 	free(cbp->cb_data);
394 	free(cbp);
395 
396 	return (0);
397 }
398 
399 /*ARGSUSED*/
400 int
401 cpc_bind_curlwp(cpc_t *cpc, cpc_set_t *set, uint_t flags)
402 {
403 	char		*packed_set;
404 	size_t		packsize;
405 	int		ret;
406 	int		subcode = -1;
407 
408 	/*
409 	 * We don't bother checking cpc_set_valid() here, because this is in the
410 	 * fast path of an app doing SIGEMT-based profiling as they restart the
411 	 * counters from their signal handler.
412 	 */
413 	if (CPC_SET_VALID_FLAGS(flags) == 0 || set->cs_nreqs <= 0) {
414 		errno = EINVAL;
415 		return (-1);
416 	}
417 
418 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
419 		errno = ENOMEM;
420 		return (-1);
421 	}
422 
423 	ret = syscall(SYS_cpc, CPC_BIND, -1, packed_set, packsize, &subcode);
424 	free(packed_set);
425 
426 	if (ret != 0) {
427 		if (subcode != -1)
428 			cpc_err(cpc, "cpc_bind_curlwp", subcode);
429 		return (-1);
430 	}
431 
432 	set->cs_thr = thr_self();
433 	set->cs_state = CS_BOUND_CURLWP;
434 	return (ret);
435 }
436 
437 /*ARGSUSED*/
438 int
439 cpc_bind_pctx(cpc_t *cpc, pctx_t *pctx, id_t id, cpc_set_t *set, uint_t flags)
440 {
441 	char		*packed_set;
442 	size_t		packsize;
443 	int		ret;
444 	int		subcode = -1;
445 
446 	/*
447 	 * cpc_bind_pctx() currently has no valid flags.
448 	 */
449 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
450 		errno = EINVAL;
451 		return (-1);
452 	}
453 
454 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
455 		errno = ENOMEM;
456 		return (-1);
457 	}
458 
459 	ret = __pctx_cpc(pctx, cpc, CPC_BIND, id, packed_set, (void *)packsize,
460 	    (void *)&subcode, -1);
461 
462 	free(packed_set);
463 
464 	if (ret == 0) {
465 		set->cs_pctx = pctx;
466 		set->cs_id = id;
467 		set->cs_state = CS_BOUND_PCTX;
468 	} else if (subcode != -1)
469 		cpc_err(cpc, "cpc_bind_pctx", subcode);
470 
471 	return (ret);
472 }
473 
474 /*ARGSUSED*/
475 int
476 cpc_bind_cpu(cpc_t *cpc, processorid_t id, cpc_set_t *set, uint_t flags)
477 {
478 	int		fd;
479 	char		*packed_set;
480 	size_t		packsize;
481 	__cpc_args_t	cpc_args;
482 	int		error;
483 	const char	*fn = "cpc_bind_cpu";
484 	int		subcode = -1;
485 
486 	/*
487 	 * cpc_bind_cpu() currently has no valid flags.
488 	 */
489 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
490 		errno = EINVAL;
491 		return (-1);
492 	}
493 
494 	if (processor_bind(P_LWPID, P_MYID, id, &set->cs_obind) == -1) {
495 		cpc_err(cpc, fn, CPC_PBIND_FAILED);
496 		return (-1);
497 	}
498 
499 	if ((fd = open(CPUDRV_SHARED, O_RDWR)) < 0) {
500 		error = errno;
501 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
502 		errno = error;
503 		return (-1);
504 	}
505 
506 	/*
507 	 * To avoid leaking file descriptors, if we find an existing fd here we
508 	 * just close it. This is only a problem if a user attempts to bind the
509 	 * same set to different CPUs without first unbinding it.
510 	 */
511 	if (set->cs_fd != -1)
512 		(void) close(set->cs_fd);
513 	set->cs_fd = fd;
514 
515 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
516 		(void) close(fd);
517 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
518 		errno = ENOMEM;
519 		return (-1);
520 	}
521 
522 	cpc_args.udata1 = packed_set;
523 	cpc_args.udata2 = (void *)packsize;
524 	cpc_args.udata3 = (void *)&subcode;
525 
526 	if (ioctl(fd, CPCIO_BIND, &cpc_args) != 0) {
527 		error = errno;
528 		free(packed_set);
529 		(void) close(fd);
530 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
531 		if (subcode != -1)
532 			cpc_err(cpc, fn, subcode);
533 		errno = error;
534 		return (-1);
535 	}
536 
537 	free(packed_set);
538 
539 	set->cs_thr = thr_self();
540 	set->cs_state = CS_BOUND_CPU;
541 
542 	return (0);
543 }
544 
545 /*ARGSUSED*/
546 int
547 cpc_request_preset(cpc_t *cpc, int index, uint64_t preset)
548 {
549 	return (syscall(SYS_cpc, CPC_PRESET, -1, index,
550 	    (uint32_t)(preset >> 32), (uint32_t)(preset & MASK32)));
551 }
552 
553 /*ARGSUSED*/
554 int
555 cpc_set_restart(cpc_t *cpc, cpc_set_t *set)
556 {
557 	return (syscall(SYS_cpc, CPC_RESTART, -1, 0, 0, 0));
558 }
559 
560 /*ARGSUSED*/
561 int
562 cpc_unbind(cpc_t *cpc, cpc_set_t *set)
563 {
564 	int		ret = 0;
565 	int		error;
566 
567 	if (cpc_set_valid(cpc, set) != 0) {
568 		errno = EINVAL;
569 		return (-1);
570 	}
571 
572 	switch (set->cs_state) {
573 	case CS_UNBOUND:
574 		errno = EINVAL;
575 		return (-1);
576 	case CS_BOUND_CURLWP:
577 		ret = syscall(SYS_cpc, CPC_RELE, -1, 0, 0, 0);
578 		error = errno;
579 		break;
580 	case CS_BOUND_CPU:
581 		ret = ioctl(set->cs_fd, CPCIO_RELE, NULL);
582 		error = errno;
583 		(void) close(set->cs_fd);
584 		set->cs_fd = -1;
585 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
586 		break;
587 	case CS_BOUND_PCTX:
588 		if (set->cs_pctx != NULL) {
589 			ret = __pctx_cpc(set->cs_pctx, cpc, CPC_RELE,
590 			    set->cs_id, 0, 0, 0, 0);
591 			error = errno;
592 		}
593 		break;
594 	}
595 
596 	set->cs_thr = NULL;
597 	set->cs_id = -1;
598 	set->cs_state = CS_UNBOUND;
599 	if (ret != 0)
600 		errno = error;
601 	return (ret);
602 }
603 
604 /*ARGSUSED*/
605 int
606 cpc_set_sample(cpc_t *cpc, cpc_set_t *set, cpc_buf_t *buf)
607 {
608 	__cpc_args_t args;
609 
610 	/*
611 	 * The following check ensures that only the most recently bound set
612 	 * can be sampled, as binding a set invalidates all other sets in the
613 	 * cpc_t.
614 	 */
615 	if (set->cs_state == CS_UNBOUND ||
616 	    buf->cb_size != set->cs_nreqs * sizeof (uint64_t)) {
617 		errno = EINVAL;
618 		return (-1);
619 	}
620 
621 	switch (set->cs_state) {
622 	case CS_BOUND_CURLWP:
623 		return (syscall(SYS_cpc, CPC_SAMPLE, -1, buf->cb_data,
624 		    &buf->cb_hrtime, &buf->cb_tick));
625 	case CS_BOUND_CPU:
626 		args.udata1 = buf->cb_data;
627 		args.udata2 = &buf->cb_hrtime;
628 		args.udata3 = &buf->cb_tick;
629 		return (ioctl(set->cs_fd, CPCIO_SAMPLE, &args));
630 	case CS_BOUND_PCTX:
631 		return (__pctx_cpc(set->cs_pctx, cpc, CPC_SAMPLE, set->cs_id,
632 		    buf->cb_data, &buf->cb_hrtime, &buf->cb_tick,
633 		    buf->cb_size));
634 	}
635 
636 	errno = EINVAL;
637 	return (-1);
638 }
639 
640 /*ARGSUSED*/
641 void
642 cpc_buf_sub(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
643 {
644 	int i;
645 
646 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
647 		return;
648 
649 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
650 	    a->cb_hrtime : b->cb_hrtime;
651 	ds->cb_tick = a->cb_tick - b->cb_tick;
652 
653 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
654 		ds->cb_data[i] = a->cb_data[i] - b->cb_data[i];
655 }
656 
657 /*ARGSUSED*/
658 void
659 cpc_buf_add(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
660 {
661 	int i;
662 
663 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
664 		return;
665 
666 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
667 	    a->cb_hrtime : b->cb_hrtime;
668 	ds->cb_tick = a->cb_tick + b->cb_tick;
669 
670 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
671 		ds->cb_data[i] = a->cb_data[i] + b->cb_data[i];
672 }
673 
674 /*ARGSUSED*/
675 void
676 cpc_buf_copy(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *src)
677 {
678 	if (ds->cb_size != src->cb_size)
679 		return;
680 
681 	bcopy(src->cb_data, ds->cb_data, ds->cb_size);
682 	ds->cb_hrtime = src->cb_hrtime;
683 	ds->cb_tick = src->cb_tick;
684 }
685 
686 /*ARGSUSED*/
687 void
688 cpc_buf_zero(cpc_t *cpc, cpc_buf_t *buf)
689 {
690 	bzero(buf->cb_data, buf->cb_size);
691 	buf->cb_hrtime = 0;
692 	buf->cb_tick = 0;
693 }
694 
695 /*
696  * Gets or sets the value of the request specified by index.
697  */
698 /*ARGSUSED*/
699 int
700 cpc_buf_get(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t *val)
701 {
702 	*val = buf->cb_data[index];
703 
704 	return (0);
705 }
706 
707 /*ARGSUSED*/
708 int
709 cpc_buf_set(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t val)
710 {
711 	buf->cb_data[index] = val;
712 
713 	return (0);
714 }
715 
716 /*ARGSUSED*/
717 hrtime_t
718 cpc_buf_hrtime(cpc_t *cpc, cpc_buf_t *buf)
719 {
720 	return (buf->cb_hrtime);
721 }
722 
723 /*ARGSUSED*/
724 uint64_t
725 cpc_buf_tick(cpc_t *cpc, cpc_buf_t *buf)
726 {
727 	return (buf->cb_tick);
728 }
729 
730 static char *
731 cpc_get_list(int which, int arg)
732 {
733 	int	szcmd;
734 	int	size;
735 	char	*list;
736 
737 	if (which == CPC_LIST_ATTRS)
738 		szcmd = CPC_ATTRLIST_SIZE;
739 	else
740 		szcmd = CPC_EVLIST_SIZE;
741 
742 	if (syscall(SYS_cpc, szcmd, -1, &size, arg, 0) != 0)
743 		return (NULL);
744 
745 	if ((list = malloc(size)) == NULL)
746 		return (NULL);
747 
748 	if (syscall(SYS_cpc, which, -1, list, arg, 0) != 0) {
749 		free(list);
750 		return (NULL);
751 	}
752 
753 	return (list);
754 }
755 
756 /*ARGSUSED*/
757 void
758 cpc_walk_requests(cpc_t *cpc, cpc_set_t *set, void *arg,
759     void (*action)(void *arg, int index, const char *event, uint64_t preset,
760 	uint_t flags, int nattrs, const cpc_attr_t *attrs))
761 {
762 	cpc_request_t	*rp;
763 	cpc_attr_t	*attrs = NULL;
764 	int		i;
765 
766 	for (rp = set->cs_request; rp != NULL; rp = rp->cr_next) {
767 		/*
768 		 * Need to reconstruct a temporary cpc_attr_t array for req.
769 		 */
770 		if (rp->cr_nattrs != 0)
771 			if ((attrs = malloc(rp->cr_nattrs *
772 			    sizeof (cpc_attr_t))) == NULL)
773 				return;
774 		for (i = 0; i < rp->cr_nattrs; i++) {
775 			attrs[i].ca_name = rp->cr_attr[i].ka_name;
776 			attrs[i].ca_val = rp->cr_attr[i].ka_val;
777 		}
778 
779 		action(arg, rp->cr_index, rp->cr_event, rp->cr_preset,
780 		    rp->cr_flags, rp->cr_nattrs, attrs);
781 
782 		if (rp->cr_nattrs != 0)
783 			free(attrs);
784 	}
785 }
786 
787 /*ARGSUSED*/
788 void
789 cpc_walk_events_all(cpc_t *cpc, void *arg,
790     void (*action)(void *arg, const char *event))
791 {
792 	char		**list;
793 	char		*p, *e;
794 	int		i;
795 	int		ncounters = cpc_npic(cpc);
796 	cpc_strhash_t	*hash;
797 
798 	if ((list = malloc(ncounters * sizeof (char *))) == NULL)
799 		return;
800 
801 	if ((hash = __cpc_strhash_alloc()) == NULL) {
802 		free(list);
803 		return;
804 	}
805 
806 	for (i = 0; i < ncounters; i++) {
807 		if ((list[i] = strdup(cpc->cpc_evlist[i])) == NULL)
808 			goto err;
809 		p = list[i];
810 		while ((e = strchr(p, ',')) != NULL) {
811 			*e = '\0';
812 			if (__cpc_strhash_add(hash, p) == -1)
813 				goto err;
814 			p = e + 1;
815 		}
816 		if (__cpc_strhash_add(hash, p) == -1)
817 			goto err;
818 	}
819 
820 	while ((p = __cpc_strhash_next(hash)) != NULL)
821 		action(arg, p);
822 
823 err:
824 	__cpc_strhash_free(hash);
825 	for (i = 0; i < ncounters; i++)
826 		free(list[i]);
827 	free(list);
828 }
829 
830 /*ARGSUSED*/
831 void
832 cpc_walk_events_pic(cpc_t *cpc, uint_t picno, void *arg,
833     void (*action)(void *arg, uint_t picno, const char *event))
834 {
835 	char	*p;
836 	char	*e;
837 	char	*list;
838 
839 	if (picno >= cpc->cpc_npic) {
840 		errno = EINVAL;
841 		return;
842 	}
843 
844 	if ((list = strdup(cpc->cpc_evlist[picno])) == NULL)
845 		return;
846 
847 	/*
848 	 * List now points to a comma-separated list of events supported by
849 	 * the designated pic.
850 	 */
851 	p = list;
852 	while ((e = strchr(p, ',')) != NULL) {
853 		*e = '\0';
854 		action(arg, picno, p);
855 		p = e + 1;
856 	}
857 	action(arg, picno, p);
858 
859 	free(list);
860 }
861 
862 /*ARGSUSED*/
863 void
864 cpc_walk_attrs(cpc_t *cpc, void *arg,
865     void (*action)(void *arg, const char *attr))
866 {
867 	char	*p;
868 	char	*e;
869 	char	*list;
870 
871 	if ((list = strdup(cpc->cpc_attrlist)) == NULL)
872 		return;
873 
874 	/*
875 	 * Platforms with no attributes will return an empty string.
876 	 */
877 	if (*list == '\0')
878 		return;
879 
880 	/*
881 	 * List now points to a comma-separated list of attributes supported by
882 	 * the underlying platform.
883 	 */
884 	p = list;
885 	while ((e = strchr(p, ',')) != NULL) {
886 		*e = '\0';
887 		action(arg, p);
888 		p = e + 1;
889 	}
890 	action(arg, p);
891 
892 	free(list);
893 }
894 
895 /*ARGSUSED*/
896 int
897 cpc_enable(cpc_t *cpc)
898 {
899 	return (syscall(SYS_cpc, CPC_ENABLE, -1, 0, 0, 0));
900 }
901 
902 /*ARGSUSED*/
903 int
904 cpc_disable(cpc_t *cpc)
905 {
906 	return (syscall(SYS_cpc, CPC_DISABLE, -1, 0, 0, 0));
907 }
908 
909 /*ARGSUSED*/
910 uint_t
911 cpc_npic(cpc_t *cpc)
912 {
913 	return (cpc->cpc_npic);
914 }
915 
916 /*ARGSUSED*/
917 uint_t
918 cpc_caps(cpc_t *cpc)
919 {
920 	return (cpc->cpc_caps);
921 }
922 
923 const char *
924 cpc_cciname(cpc_t *cpc)
925 {
926 	return (cpc->cpc_cciname);
927 }
928 
929 const char *
930 cpc_cpuref(cpc_t *cpc)
931 {
932 	return (cpc->cpc_cpuref);
933 }
934 
935 int
936 cpc_seterrhndlr(cpc_t *cpc, cpc_errhndlr_t *fn)
937 {
938 	cpc->cpc_errfn = fn;
939 	return (0);
940 }
941 
942 /*
943  * These strings may contain printf() conversion specifiers.
944  */
945 static const char *errstr[] = {
946 "",						/* zero slot filler */
947 "Unknown event\n",				/* CPC_INVALID_EVENT */
948 "Invalid counter number\n",			/* CPC_INVALID_PICNUM */
949 "Unknown attribute\n",				/* CPC_INVALID_ATTRIBUTE */
950 "Attribute out of range\n",			/* CPC_ATTRIBUTE_OUT_OF_RANGE */
951 "Hardware resource unavailable\n",		/* CPC_RESOURCE_UNAVAIL */
952 "Counter cannot count requested event\n",	/* CPC_PIC_NOT_CAPABLE */
953 "Invalid flags in a request\n",			/* CPC_REQ_INVALID_FLAGS */
954 "Requests conflict with each other\n",		/* CPC_CONFLICTING_REQS */
955 "Attribute requires the cpc_cpu privilege\n",  /* CPC_ATTR_REQUIRES_PRIVILEGE */
956 "Couldn't bind LWP to requested processor\n",	/* CPC_PBIND_FAILED */
957 "Hypervisor event access denied\n"		/* CPC_HV_NO_ACCESS */
958 };
959 
960 /*VARARGS3*/
961 static void
962 cpc_err(cpc_t *cpc, const char *fn, int subcode, ...)
963 {
964 	va_list		ap;
965 	const char	*str;
966 	int		error;
967 
968 	/*
969 	 * If subcode is -1, there is no specific description for this error.
970 	 */
971 	if (subcode == -1)
972 		return;
973 
974 	/*
975 	 * We need to preserve errno across calls to this function to prevent it
976 	 * from being clobbered while here, or in the user's error handler.
977 	 */
978 	error = errno;
979 
980 	str = dgettext(TEXT_DOMAIN, errstr[subcode]);
981 
982 	va_start(ap, subcode);
983 	if (cpc->cpc_errfn != NULL)
984 		cpc->cpc_errfn(fn, subcode, str, ap);
985 	else {
986 		/*
987 		 * If printf() conversion specifiers are added to the errstr[]
988 		 * table, this call needs to be changed to vfprintf().
989 		 */
990 		(void) fprintf(stderr, "libcpc: %s: %s", fn, str);
991 	}
992 	va_end(ap);
993 
994 	errno = error;
995 }
996 
997 /*
998  * Hook used by libpctx to alert libcpc when a pctx handle is going away.
999  * This is necessary to prevent libcpc from attempting a libpctx operation on a
1000  * stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
1001  * to be notified when they go away.
1002  */
1003 static void
1004 cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx)
1005 {
1006 	cpc_set_t	*set;
1007 	int		sigblocked;
1008 
1009 	sigblocked = cpc_lock(cpc);
1010 	for (set = cpc->cpc_sets; set != NULL; set = set->cs_next)
1011 		if (set->cs_pctx == pctx)
1012 			set->cs_pctx = NULL;
1013 	cpc_unlock(cpc, sigblocked);
1014 }
1015 
1016 /*
1017  * Check that the set is valid; if so it will be in the cpc handle's
1018  * list of sets. The lock protects the list of sets, but not the set
1019  * itself.
1020  */
1021 static int
1022 cpc_set_valid(cpc_t *cpc, cpc_set_t *set)
1023 {
1024 	cpc_set_t	*csp;
1025 	int		sigblocked;
1026 
1027 	sigblocked = cpc_lock(cpc);
1028 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next)
1029 		if (csp == set)
1030 			break;
1031 	cpc_unlock(cpc, sigblocked);
1032 	if (csp == NULL)
1033 		return (-1);
1034 	return (0);
1035 }
1036 
1037 static int
1038 cpc_lock(cpc_t *cpc)
1039 {
1040 	int ret = (sigset(SIGEMT, SIG_HOLD) == SIG_HOLD);
1041 	(void) mutex_lock(&cpc->cpc_lock);
1042 	return (ret);
1043 }
1044 
1045 static void
1046 cpc_unlock(cpc_t *cpc, int sigblocked)
1047 {
1048 	(void) mutex_unlock(&cpc->cpc_lock);
1049 	if (sigblocked == 0)
1050 		(void) sigrelse(SIGEMT);
1051 }
1052 
1053 struct priv {
1054 	const char *name;
1055 	int found;
1056 };
1057 
1058 /*ARGSUSED*/
1059 static void
1060 ev_walker(void *arg, uint_t picno, const char *ev)
1061 {
1062 	if (strcmp(((struct priv *)arg)->name, ev) == 0)
1063 		((struct priv *)arg)->found = 1;
1064 }
1065 
1066 static void
1067 at_walker(void *arg, const char *at)
1068 {
1069 	if (strcmp(((struct priv *)arg)->name, at) == 0)
1070 		((struct priv *)arg)->found = 1;
1071 }
1072 
1073 static int
1074 cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev)
1075 {
1076 	struct priv pr = { NULL, 0 };
1077 	char *end_ev;
1078 
1079 	pr.name = ev;
1080 	cpc_walk_events_pic(cpc, pic, &pr, ev_walker);
1081 	if (pr.found)
1082 		return (1);
1083 
1084 	/*
1085 	 * Before assuming this is an invalid event, see if we have been given
1086 	 * a raw event code. An event code of '0' is not recognized, as it
1087 	 * already has a corresponding event name in existing backends and it
1088 	 * is the only reasonable way to know if strtol() succeeded.
1089 	 * Check the second argument of strtol() to ensure invalid events
1090 	 * beginning with number do not go through.
1091 	 */
1092 	if ((strtol(ev, &end_ev, 0) != 0) && (*end_ev == '\0'))
1093 		/*
1094 		 * Success - this is a valid raw code in hex, decimal, or octal.
1095 		 */
1096 		return (1);
1097 
1098 	return (0);
1099 }
1100 
1101 static int
1102 cpc_valid_attr(cpc_t *cpc, char *attr)
1103 {
1104 	struct priv pr = { NULL, 0 };
1105 
1106 	pr.name = attr;
1107 	cpc_walk_attrs(cpc, &pr, at_walker);
1108 	return (pr.found);
1109 }
1110