xref: /titanic_41/usr/src/lib/libcpc/common/libcpc.c (revision 70025d765b044c6d8594bb965a2247a61e991a99)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <libcpc.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <errno.h>
33 #include <strings.h>
34 #include <unistd.h>
35 #include <stropts.h>
36 #include <libintl.h>
37 #include <signal.h>
38 #include <sys/syscall.h>
39 #include <sys/types.h>
40 #include <sys/processor.h>
41 #include <sys/procset.h>
42 
43 #include "libcpc_impl.h"
44 
45 #define	MASK32 0xFFFFFFFF
46 
47 /*
48  * The library uses the cpc_lock field of the cpc_t struct to protect access to
49  * the linked lists inside the cpc_t, and only the linked lists. It is NOT used
50  * to protect against a user shooting his/herself in the foot (such as, for
51  * instance, destroying the same set at the same time from different threads.).
52  *
53  * SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
54  * an app holding the lock and a signal handler attempting to sample or bind.
55  */
56 
57 static char *cpc_get_list(int which, int arg);
58 static void cpc_err(cpc_t *cpc, const char *fn, int subcode, ...);
59 static int cpc_set_valid(cpc_t *cpc, cpc_set_t *set);
60 static int cpc_lock(cpc_t *cpc);
61 static void cpc_unlock(cpc_t *cpc, int blocked);
62 static int cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev);
63 static int cpc_valid_attr(cpc_t *cpc, char *attr);
64 static void cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx);
65 
66 cpc_t *
67 cpc_open(int ver)
68 {
69 	cpc_t	*cpc;
70 	void	(*sigsaved)();
71 	int	error = 0;
72 	int	i;
73 	int	j;
74 
75 	if (ver != CPC_VER_CURRENT) {
76 		/*
77 		 * v1 clients must stick to the v1 interface: cpc_version()
78 		 */
79 		errno = EINVAL;
80 		return (NULL);
81 	}
82 
83 	/*
84 	 * Call the syscall with invalid parameters.  If we get ENOSYS this CPU
85 	 * has no CPC support.  We need to block SIGSYS because the syscall code
86 	 * will send the signal if the system call fails to load.
87 	 */
88 	sigsaved = signal(SIGSYS, SIG_IGN);
89 	if (syscall(SYS_cpc, -1, -1, -1, -1, -1) != -1) {
90 		(void) signal(SIGSYS, sigsaved);
91 		errno = EINVAL;
92 		return (NULL);
93 	}
94 	error = errno;
95 	(void) signal(SIGSYS, sigsaved);
96 
97 	if (error != EINVAL) {
98 		errno = error;
99 		return (NULL);
100 	}
101 
102 	if ((cpc = malloc(sizeof (cpc_t))) == NULL) {
103 		errno = ENOMEM;
104 		return (NULL);
105 	}
106 
107 	cpc->cpc_npic = syscall(SYS_cpc, CPC_NPIC, -1, 0, 0, 0);
108 	cpc->cpc_caps = syscall(SYS_cpc, CPC_CAPS, -1, 0, 0, 0);
109 
110 	if (syscall(SYS_cpc, CPC_IMPL_NAME, -1, &cpc->cpc_cciname, 0, 0) != 0)
111 		return (NULL);
112 	if (syscall(SYS_cpc, CPC_CPUREF, -1, &cpc->cpc_cpuref, 0, 0) != 0)
113 		return (NULL);
114 
115 
116 	if ((cpc->cpc_attrlist = cpc_get_list(CPC_LIST_ATTRS, 0)) == NULL) {
117 		free(cpc);
118 		return (NULL);
119 	}
120 
121 	if ((cpc->cpc_evlist = malloc(cpc->cpc_npic * sizeof (char *))) ==
122 	    NULL) {
123 		free(cpc->cpc_attrlist);
124 		free(cpc);
125 		return (NULL);
126 	}
127 
128 	for (i = 0; i < cpc->cpc_npic; i++) {
129 		if ((cpc->cpc_evlist[i] = cpc_get_list(CPC_LIST_EVENTS, i)) ==
130 		    NULL)
131 			break;
132 	}
133 	if (i != cpc->cpc_npic) {
134 		for (j = 0; j < i; j++)
135 			free(cpc->cpc_evlist[j]);
136 		free(cpc->cpc_evlist);
137 		free(cpc->cpc_attrlist);
138 		free(cpc);
139 		return (NULL);
140 	}
141 
142 	cpc->cpc_sets = NULL;
143 	cpc->cpc_bufs = NULL;
144 	cpc->cpc_errfn = NULL;
145 	(void) mutex_init(&cpc->cpc_lock, USYNC_THREAD, NULL);
146 	__pctx_cpc_register_callback(cpc_invalidate_pctx);
147 
148 	return (cpc);
149 }
150 
151 /*
152  * Ensure state is cleaned up:
153  *
154  * - Hardware is unbound
155  * - Sets are all destroyed
156  * - Bufs are all freed
157  */
158 int
159 cpc_close(cpc_t *cpc)
160 {
161 	while (cpc->cpc_sets != NULL) {
162 		if (cpc->cpc_sets->cs_state != CS_UNBOUND)
163 			(void) cpc_unbind(cpc, cpc->cpc_sets);
164 		(void) cpc_set_destroy(cpc, cpc->cpc_sets);
165 	}
166 
167 	while (cpc->cpc_bufs != NULL)
168 		(void) cpc_buf_destroy(cpc, cpc->cpc_bufs);
169 
170 	free(cpc);
171 	return (0);
172 }
173 
174 cpc_set_t *
175 cpc_set_create(cpc_t *cpc)
176 {
177 	cpc_set_t	*set;
178 	int		sigblocked;
179 
180 	if ((set = malloc(sizeof (*set))) == NULL) {
181 		errno = ENOMEM;
182 		return (NULL);
183 	}
184 
185 	set->cs_request = NULL;
186 	set->cs_nreqs	= 0;
187 	set->cs_state	= CS_UNBOUND;
188 	set->cs_fd	= -1;
189 	set->cs_pctx	= NULL;
190 	set->cs_id	= -1;
191 	set->cs_thr	= NULL;
192 
193 	sigblocked = cpc_lock(cpc);
194 	set->cs_next = cpc->cpc_sets;
195 	cpc->cpc_sets = set;
196 	cpc_unlock(cpc, sigblocked);
197 
198 	return (set);
199 }
200 
201 int
202 cpc_set_destroy(cpc_t *cpc, cpc_set_t *set)
203 {
204 	cpc_set_t	*csp, *prev;
205 	cpc_request_t	*req, *next;
206 	int		sigblocked;
207 
208 	/*
209 	 * Remove this set from the cpc handle's list of sets.
210 	 */
211 	sigblocked = cpc_lock(cpc);
212 	for (csp = prev = cpc->cpc_sets; csp != NULL; csp = csp->cs_next) {
213 		if (csp == set)
214 			break;
215 		prev = csp;
216 	}
217 	if (csp == NULL) {
218 		cpc_unlock(cpc, sigblocked);
219 		errno = EINVAL;
220 		return (-1);
221 	}
222 	if (csp == cpc->cpc_sets)
223 		cpc->cpc_sets = csp->cs_next;
224 	prev->cs_next = csp->cs_next;
225 	cpc_unlock(cpc, sigblocked);
226 
227 	if (csp->cs_state != CS_UNBOUND)
228 		(void) cpc_unbind(cpc, csp);
229 
230 	for (req = csp->cs_request; req != NULL; req = next) {
231 		next = req->cr_next;
232 
233 		if (req->cr_nattrs != 0)
234 			free(req->cr_attr);
235 
236 		free(req);
237 	}
238 
239 
240 	free(set);
241 
242 	return (0);
243 }
244 
245 /*ARGSUSED*/
246 int
247 cpc_set_add_request(cpc_t *cpc, cpc_set_t *set, const char *event,
248     uint64_t preset, uint_t flags, uint_t nattrs, const cpc_attr_t *attrs)
249 {
250 	cpc_request_t	*req;
251 	const char	*fn = "cpc_set_add_request";
252 	int		i;
253 	int		npics = cpc_npic(cpc);
254 
255 	if (cpc_set_valid(cpc, set) != 0 || set->cs_state != CS_UNBOUND) {
256 		errno = EINVAL;
257 		return (-1);
258 	}
259 
260 	for (i = 0; i < npics; i++)
261 		if (cpc_valid_event(cpc, i, event))
262 			break;
263 	if (i == npics) {
264 		cpc_err(cpc, fn, CPC_INVALID_EVENT);
265 		errno = EINVAL;
266 		return (-1);
267 	}
268 
269 	if ((req = malloc(sizeof (*req))) == NULL) {
270 		errno = ENOMEM;
271 		return (-1);
272 	}
273 
274 	(void) strncpy(req->cr_event, event, CPC_MAX_EVENT_LEN);
275 	req->cr_preset = preset;
276 	req->cr_flags = flags;
277 	req->cr_nattrs = nattrs;
278 	req->cr_index = set->cs_nreqs;
279 	req->cr_attr = NULL;
280 
281 	if (nattrs != 0) {
282 		for (i = 0; i < nattrs; i++) {
283 			/*
284 			 * Verify that each attribute name is legal and valid.
285 			 */
286 			if (attrs[i].ca_name[0] == '\0' ||
287 			    cpc_valid_attr(cpc, attrs[i].ca_name) == 0) {
288 				cpc_err(cpc, fn, CPC_INVALID_ATTRIBUTE);
289 				goto inval;
290 			}
291 
292 			/*
293 			 * If the user requested a specific picnum, ensure that
294 			 * the pic can count the requested event.
295 			 */
296 			if (strncmp("picnum", attrs[i].ca_name, 8) == 0) {
297 				if (attrs[i].ca_val >= npics) {
298 					cpc_err(cpc, fn, CPC_INVALID_PICNUM);
299 					goto inval;
300 				}
301 
302 				if (cpc_valid_event(cpc, attrs[i].ca_val,
303 				    req->cr_event) == 0) {
304 					cpc_err(cpc, fn, CPC_PIC_NOT_CAPABLE);
305 					goto inval;
306 				}
307 			}
308 		}
309 
310 		if ((req->cr_attr = malloc(nattrs * sizeof (kcpc_attr_t)))
311 		    == NULL) {
312 			free(req);
313 			return (-1);
314 		}
315 
316 		for (i = 0; i < nattrs; i++) {
317 			req->cr_attr[i].ka_val = attrs[i].ca_val;
318 			(void) strncpy(req->cr_attr[i].ka_name,
319 			    attrs[i].ca_name, CPC_MAX_ATTR_LEN);
320 		}
321 	} else
322 		req->cr_attr = NULL;
323 
324 	req->cr_next = set->cs_request;
325 	set->cs_request = req;
326 	set->cs_nreqs++;
327 
328 	return (req->cr_index);
329 
330 inval:
331 	free(req);
332 	errno = EINVAL;
333 	return (-1);
334 }
335 
336 cpc_buf_t *
337 cpc_buf_create(cpc_t *cpc, cpc_set_t *set)
338 {
339 	cpc_buf_t	*buf;
340 	int		sigblocked;
341 
342 	if (cpc_set_valid(cpc, set) != 0) {
343 		errno = EINVAL;
344 		return (NULL);
345 	}
346 
347 	if ((buf = malloc(sizeof (*buf))) == NULL)
348 		return (NULL);
349 
350 	buf->cb_size = set->cs_nreqs * sizeof (uint64_t);
351 	if ((buf->cb_data = malloc(buf->cb_size)) == NULL) {
352 		free(buf);
353 		return (NULL);
354 	}
355 
356 	bzero(buf->cb_data, buf->cb_size);
357 
358 	buf->cb_hrtime = 0;
359 	buf->cb_tick = 0;
360 
361 	sigblocked = cpc_lock(cpc);
362 	buf->cb_next = cpc->cpc_bufs;
363 	cpc->cpc_bufs = buf;
364 	cpc_unlock(cpc, sigblocked);
365 
366 	return (buf);
367 }
368 
369 int
370 cpc_buf_destroy(cpc_t *cpc, cpc_buf_t *buf)
371 {
372 	cpc_buf_t	*cbp, *prev;
373 	int		sigblocked;
374 
375 	/*
376 	 * Remove this buf from the cpc handle's list of bufs.
377 	 */
378 	sigblocked = cpc_lock(cpc);
379 	for (cbp = prev = cpc->cpc_bufs; cbp != NULL; cbp = cbp->cb_next) {
380 		if (cbp == buf)
381 			break;
382 		prev = cbp;
383 	}
384 	if (cbp == NULL) {
385 		cpc_unlock(cpc, sigblocked);
386 		errno = EINVAL;
387 		return (-1);
388 	}
389 	if (cbp == cpc->cpc_bufs)
390 		cpc->cpc_bufs = cbp->cb_next;
391 	prev->cb_next = cbp->cb_next;
392 
393 	cpc_unlock(cpc, sigblocked);
394 	free(cbp->cb_data);
395 	free(cbp);
396 
397 	return (0);
398 }
399 
400 /*ARGSUSED*/
401 int
402 cpc_bind_curlwp(cpc_t *cpc, cpc_set_t *set, uint_t flags)
403 {
404 	char		*packed_set;
405 	size_t		packsize;
406 	int		ret;
407 	int		subcode = -1;
408 
409 	/*
410 	 * We don't bother checking cpc_set_valid() here, because this is in the
411 	 * fast path of an app doing SIGEMT-based profiling as they restart the
412 	 * counters from their signal handler.
413 	 */
414 	if (CPC_SET_VALID_FLAGS(flags) == 0 || set->cs_nreqs <= 0) {
415 		errno = EINVAL;
416 		return (-1);
417 	}
418 
419 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
420 		errno = ENOMEM;
421 		return (-1);
422 	}
423 
424 	ret = syscall(SYS_cpc, CPC_BIND, -1, packed_set, packsize, &subcode);
425 	free(packed_set);
426 
427 	if (ret != 0) {
428 		if (subcode != -1)
429 			cpc_err(cpc, "cpc_bind_curlwp", subcode);
430 		return (-1);
431 	}
432 
433 	set->cs_thr = thr_self();
434 	set->cs_state = CS_BOUND_CURLWP;
435 	return (ret);
436 }
437 
438 /*ARGSUSED*/
439 int
440 cpc_bind_pctx(cpc_t *cpc, pctx_t *pctx, id_t id, cpc_set_t *set, uint_t flags)
441 {
442 	char		*packed_set;
443 	size_t		packsize;
444 	int		ret;
445 	int		subcode = -1;
446 
447 	/*
448 	 * cpc_bind_pctx() currently has no valid flags.
449 	 */
450 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
451 		errno = EINVAL;
452 		return (-1);
453 	}
454 
455 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
456 		errno = ENOMEM;
457 		return (-1);
458 	}
459 
460 	ret = __pctx_cpc(pctx, cpc, CPC_BIND, id, packed_set, (void *)packsize,
461 	    (void *)&subcode, -1);
462 
463 	free(packed_set);
464 
465 	if (ret == 0) {
466 		set->cs_pctx = pctx;
467 		set->cs_id = id;
468 		set->cs_state = CS_BOUND_PCTX;
469 	} else if (subcode != -1)
470 		cpc_err(cpc, "cpc_bind_pctx", subcode);
471 
472 	return (ret);
473 }
474 
475 /*ARGSUSED*/
476 int
477 cpc_bind_cpu(cpc_t *cpc, processorid_t id, cpc_set_t *set, uint_t flags)
478 {
479 	int		fd;
480 	char		*packed_set;
481 	size_t		packsize;
482 	__cpc_args_t	cpc_args;
483 	int		error;
484 	const char	*fn = "cpc_bind_cpu";
485 	int		subcode = -1;
486 
487 	/*
488 	 * cpc_bind_cpu() currently has no valid flags.
489 	 */
490 	if (flags != 0 || cpc_set_valid(cpc, set) != 0 || set->cs_nreqs <= 0) {
491 		errno = EINVAL;
492 		return (-1);
493 	}
494 
495 	if (processor_bind(P_LWPID, P_MYID, id, &set->cs_obind) == -1) {
496 		cpc_err(cpc, fn, CPC_PBIND_FAILED);
497 		return (-1);
498 	}
499 
500 	if ((fd = open(CPUDRV_SHARED, O_RDWR)) < 0) {
501 		error = errno;
502 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
503 		errno = error;
504 		return (-1);
505 	}
506 
507 	/*
508 	 * To avoid leaking file descriptors, if we find an existing fd here we
509 	 * just close it. This is only a problem if a user attempts to bind the
510 	 * same set to different CPUs without first unbinding it.
511 	 */
512 	if (set->cs_fd != -1)
513 		(void) close(set->cs_fd);
514 	set->cs_fd = fd;
515 
516 	if ((packed_set = __cpc_pack_set(set, flags, &packsize)) == NULL) {
517 		(void) close(fd);
518 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
519 		errno = ENOMEM;
520 		return (-1);
521 	}
522 
523 	cpc_args.udata1 = packed_set;
524 	cpc_args.udata2 = (void *)packsize;
525 	cpc_args.udata3 = (void *)&subcode;
526 
527 	if (ioctl(fd, CPCIO_BIND, &cpc_args) != 0) {
528 		error = errno;
529 		free(packed_set);
530 		(void) close(fd);
531 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
532 		if (subcode != -1)
533 			cpc_err(cpc, fn, subcode);
534 		errno = error;
535 		return (-1);
536 	}
537 
538 	free(packed_set);
539 
540 	set->cs_thr = thr_self();
541 	set->cs_state = CS_BOUND_CPU;
542 
543 	return (0);
544 }
545 
546 /*ARGSUSED*/
547 int
548 cpc_request_preset(cpc_t *cpc, int index, uint64_t preset)
549 {
550 	return (syscall(SYS_cpc, CPC_PRESET, -1, index,
551 	    (uint32_t)(preset >> 32), (uint32_t)(preset & MASK32)));
552 }
553 
554 /*ARGSUSED*/
555 int
556 cpc_set_restart(cpc_t *cpc, cpc_set_t *set)
557 {
558 	return (syscall(SYS_cpc, CPC_RESTART, -1, 0, 0, 0));
559 }
560 
561 /*ARGSUSED*/
562 int
563 cpc_unbind(cpc_t *cpc, cpc_set_t *set)
564 {
565 	int		ret = 0;
566 	int		error;
567 
568 	if (cpc_set_valid(cpc, set) != 0) {
569 		errno = EINVAL;
570 		return (-1);
571 	}
572 
573 	switch (set->cs_state) {
574 	case CS_UNBOUND:
575 		errno = EINVAL;
576 		return (-1);
577 	case CS_BOUND_CURLWP:
578 		ret = syscall(SYS_cpc, CPC_RELE, -1, 0, 0, 0);
579 		error = errno;
580 		break;
581 	case CS_BOUND_CPU:
582 		ret = ioctl(set->cs_fd, CPCIO_RELE, NULL);
583 		error = errno;
584 		(void) close(set->cs_fd);
585 		set->cs_fd = -1;
586 		(void) processor_bind(P_LWPID, P_MYID, set->cs_obind, NULL);
587 		break;
588 	case CS_BOUND_PCTX:
589 		if (set->cs_pctx != NULL) {
590 			ret = __pctx_cpc(set->cs_pctx, cpc, CPC_RELE,
591 			    set->cs_id, 0, 0, 0, 0);
592 			error = errno;
593 		}
594 		break;
595 	}
596 
597 	set->cs_thr = NULL;
598 	set->cs_id = -1;
599 	set->cs_state = CS_UNBOUND;
600 	if (ret != 0)
601 		errno = error;
602 	return (ret);
603 }
604 
605 /*ARGSUSED*/
606 int
607 cpc_set_sample(cpc_t *cpc, cpc_set_t *set, cpc_buf_t *buf)
608 {
609 	__cpc_args_t args;
610 
611 	/*
612 	 * The following check ensures that only the most recently bound set
613 	 * can be sampled, as binding a set invalidates all other sets in the
614 	 * cpc_t.
615 	 */
616 	if (set->cs_state == CS_UNBOUND ||
617 	    buf->cb_size != set->cs_nreqs * sizeof (uint64_t)) {
618 		errno = EINVAL;
619 		return (-1);
620 	}
621 
622 	switch (set->cs_state) {
623 	case CS_BOUND_CURLWP:
624 		return (syscall(SYS_cpc, CPC_SAMPLE, -1, buf->cb_data,
625 		    &buf->cb_hrtime, &buf->cb_tick));
626 	case CS_BOUND_CPU:
627 		args.udata1 = buf->cb_data;
628 		args.udata2 = &buf->cb_hrtime;
629 		args.udata3 = &buf->cb_tick;
630 		return (ioctl(set->cs_fd, CPCIO_SAMPLE, &args));
631 	case CS_BOUND_PCTX:
632 		return (__pctx_cpc(set->cs_pctx, cpc, CPC_SAMPLE, set->cs_id,
633 		    buf->cb_data, &buf->cb_hrtime, &buf->cb_tick,
634 		    buf->cb_size));
635 	}
636 
637 	errno = EINVAL;
638 	return (-1);
639 }
640 
641 /*ARGSUSED*/
642 void
643 cpc_buf_sub(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
644 {
645 	int i;
646 
647 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
648 		return;
649 
650 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
651 	    a->cb_hrtime : b->cb_hrtime;
652 	ds->cb_tick = a->cb_tick - b->cb_tick;
653 
654 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
655 		ds->cb_data[i] = a->cb_data[i] - b->cb_data[i];
656 }
657 
658 /*ARGSUSED*/
659 void
660 cpc_buf_add(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *a, cpc_buf_t *b)
661 {
662 	int i;
663 
664 	if (a->cb_size != ds->cb_size || b->cb_size != ds->cb_size)
665 		return;
666 
667 	ds->cb_hrtime = (a->cb_hrtime > b->cb_hrtime) ?
668 	    a->cb_hrtime : b->cb_hrtime;
669 	ds->cb_tick = a->cb_tick + b->cb_tick;
670 
671 	for (i = 0; i < ds->cb_size / sizeof (uint64_t); i++)
672 		ds->cb_data[i] = a->cb_data[i] + b->cb_data[i];
673 }
674 
675 /*ARGSUSED*/
676 void
677 cpc_buf_copy(cpc_t *cpc, cpc_buf_t *ds, cpc_buf_t *src)
678 {
679 	if (ds->cb_size != src->cb_size)
680 		return;
681 
682 	bcopy(src->cb_data, ds->cb_data, ds->cb_size);
683 	ds->cb_hrtime = src->cb_hrtime;
684 	ds->cb_tick = src->cb_tick;
685 }
686 
687 /*ARGSUSED*/
688 void
689 cpc_buf_zero(cpc_t *cpc, cpc_buf_t *buf)
690 {
691 	bzero(buf->cb_data, buf->cb_size);
692 	buf->cb_hrtime = 0;
693 	buf->cb_tick = 0;
694 }
695 
696 /*
697  * Gets or sets the value of the request specified by index.
698  */
699 /*ARGSUSED*/
700 int
701 cpc_buf_get(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t *val)
702 {
703 	*val = buf->cb_data[index];
704 
705 	return (0);
706 }
707 
708 /*ARGSUSED*/
709 int
710 cpc_buf_set(cpc_t *cpc, cpc_buf_t *buf, int index, uint64_t val)
711 {
712 	buf->cb_data[index] = val;
713 
714 	return (0);
715 }
716 
717 /*ARGSUSED*/
718 hrtime_t
719 cpc_buf_hrtime(cpc_t *cpc, cpc_buf_t *buf)
720 {
721 	return (buf->cb_hrtime);
722 }
723 
724 /*ARGSUSED*/
725 uint64_t
726 cpc_buf_tick(cpc_t *cpc, cpc_buf_t *buf)
727 {
728 	return (buf->cb_tick);
729 }
730 
731 static char *
732 cpc_get_list(int which, int arg)
733 {
734 	int	szcmd;
735 	int	size;
736 	char	*list;
737 
738 	if (which == CPC_LIST_ATTRS)
739 		szcmd = CPC_ATTRLIST_SIZE;
740 	else
741 		szcmd = CPC_EVLIST_SIZE;
742 
743 	if (syscall(SYS_cpc, szcmd, -1, &size, arg, 0) != 0)
744 		return (NULL);
745 
746 	if ((list = malloc(size)) == NULL)
747 		return (NULL);
748 
749 	if (syscall(SYS_cpc, which, -1, list, arg, 0) != 0) {
750 		free(list);
751 		return (NULL);
752 	}
753 
754 	return (list);
755 }
756 
757 /*ARGSUSED*/
758 void
759 cpc_walk_requests(cpc_t *cpc, cpc_set_t *set, void *arg,
760     void (*action)(void *arg, int index, const char *event, uint64_t preset,
761 	uint_t flags, int nattrs, const cpc_attr_t *attrs))
762 {
763 	cpc_request_t	*rp;
764 	cpc_attr_t	*attrs = NULL;
765 	int		i;
766 
767 	for (rp = set->cs_request; rp != NULL; rp = rp->cr_next) {
768 		/*
769 		 * Need to reconstruct a temporary cpc_attr_t array for req.
770 		 */
771 		if (rp->cr_nattrs != 0)
772 			if ((attrs = malloc(rp->cr_nattrs *
773 			    sizeof (cpc_attr_t))) == NULL)
774 				return;
775 		for (i = 0; i < rp->cr_nattrs; i++) {
776 			attrs[i].ca_name = rp->cr_attr[i].ka_name;
777 			attrs[i].ca_val = rp->cr_attr[i].ka_val;
778 		}
779 
780 		action(arg, rp->cr_index, rp->cr_event, rp->cr_preset,
781 		    rp->cr_flags, rp->cr_nattrs, attrs);
782 
783 		if (rp->cr_nattrs != 0)
784 			free(attrs);
785 	}
786 }
787 
788 /*ARGSUSED*/
789 void
790 cpc_walk_events_all(cpc_t *cpc, void *arg,
791     void (*action)(void *arg, const char *event))
792 {
793 	char		**list;
794 	char		*p, *e;
795 	int		i;
796 	int		ncounters = cpc_npic(cpc);
797 	cpc_strhash_t	*hash;
798 
799 	if ((list = malloc(ncounters * sizeof (char *))) == NULL)
800 		return;
801 
802 	if ((hash = __cpc_strhash_alloc()) == NULL) {
803 		free(list);
804 		return;
805 	}
806 
807 	for (i = 0; i < ncounters; i++) {
808 		if ((list[i] = strdup(cpc->cpc_evlist[i])) == NULL)
809 			goto err;
810 		p = list[i];
811 		while ((e = strchr(p, ',')) != NULL) {
812 			*e = '\0';
813 			if (__cpc_strhash_add(hash, p) == -1)
814 				goto err;
815 			p = e + 1;
816 		}
817 		if (__cpc_strhash_add(hash, p) == -1)
818 			goto err;
819 	}
820 
821 	while ((p = __cpc_strhash_next(hash)) != NULL)
822 		action(arg, p);
823 
824 err:
825 	__cpc_strhash_free(hash);
826 	for (i = 0; i < ncounters; i++)
827 		free(list[i]);
828 	free(list);
829 }
830 
831 /*ARGSUSED*/
832 void
833 cpc_walk_events_pic(cpc_t *cpc, uint_t picno, void *arg,
834     void (*action)(void *arg, uint_t picno, const char *event))
835 {
836 	char	*p;
837 	char	*e;
838 	char	*list;
839 
840 	if (picno >= cpc->cpc_npic) {
841 		errno = EINVAL;
842 		return;
843 	}
844 
845 	if ((list = strdup(cpc->cpc_evlist[picno])) == NULL)
846 		return;
847 
848 	/*
849 	 * List now points to a comma-separated list of events supported by
850 	 * the designated pic.
851 	 */
852 	p = list;
853 	while ((e = strchr(p, ',')) != NULL) {
854 		*e = '\0';
855 		action(arg, picno, p);
856 		p = e + 1;
857 	}
858 	action(arg, picno, p);
859 
860 	free(list);
861 }
862 
863 /*ARGSUSED*/
864 void
865 cpc_walk_attrs(cpc_t *cpc, void *arg,
866     void (*action)(void *arg, const char *attr))
867 {
868 	char	*p;
869 	char	*e;
870 	char	*list;
871 
872 	if ((list = strdup(cpc->cpc_attrlist)) == NULL)
873 		return;
874 
875 	/*
876 	 * Platforms with no attributes will return an empty string.
877 	 */
878 	if (*list == '\0')
879 		return;
880 
881 	/*
882 	 * List now points to a comma-separated list of attributes supported by
883 	 * the underlying platform.
884 	 */
885 	p = list;
886 	while ((e = strchr(p, ',')) != NULL) {
887 		*e = '\0';
888 		action(arg, p);
889 		p = e + 1;
890 	}
891 	action(arg, p);
892 
893 	free(list);
894 }
895 
896 /*ARGSUSED*/
897 int
898 cpc_enable(cpc_t *cpc)
899 {
900 	return (syscall(SYS_cpc, CPC_ENABLE, -1, 0, 0, 0));
901 }
902 
903 /*ARGSUSED*/
904 int
905 cpc_disable(cpc_t *cpc)
906 {
907 	return (syscall(SYS_cpc, CPC_DISABLE, -1, 0, 0, 0));
908 }
909 
910 /*ARGSUSED*/
911 uint_t
912 cpc_npic(cpc_t *cpc)
913 {
914 	return (cpc->cpc_npic);
915 }
916 
917 /*ARGSUSED*/
918 uint_t
919 cpc_caps(cpc_t *cpc)
920 {
921 	return (cpc->cpc_caps);
922 }
923 
924 const char *
925 cpc_cciname(cpc_t *cpc)
926 {
927 	return (cpc->cpc_cciname);
928 }
929 
930 const char *
931 cpc_cpuref(cpc_t *cpc)
932 {
933 	return (cpc->cpc_cpuref);
934 }
935 
936 int
937 cpc_seterrhndlr(cpc_t *cpc, cpc_errhndlr_t *fn)
938 {
939 	cpc->cpc_errfn = fn;
940 	return (0);
941 }
942 
943 /*
944  * These strings may contain printf() conversion specifiers.
945  */
946 static const char *errstr[] = {
947 "",						/* zero slot filler */
948 "Unknown event\n",				/* CPC_INVALID_EVENT */
949 "Invalid counter number\n",			/* CPC_INVALID_PICNUM */
950 "Unknown attribute\n",				/* CPC_INVALID_ATTRIBUTE */
951 "Attribute out of range\n",			/* CPC_ATTRIBUTE_OUT_OF_RANGE */
952 "Hardware resource unavailable\n",		/* CPC_RESOURCE_UNAVAIL */
953 "Counter cannot count requested event\n",	/* CPC_PIC_NOT_CAPABLE */
954 "Invalid flags in a request\n",			/* CPC_REQ_INVALID_FLAGS */
955 "Requests conflict with each other\n",		/* CPC_CONFLICTING_REQS */
956 "Attribute requires the cpc_cpu privilege\n",  /* CPC_ATTR_REQUIRES_PRIVILEGE */
957 "Couldn't bind LWP to requested processor\n"	/* CPC_PBIND_FAILED */
958 };
959 
960 /*VARARGS3*/
961 static void
962 cpc_err(cpc_t *cpc, const char *fn, int subcode, ...)
963 {
964 	va_list		ap;
965 	const char	*str;
966 	int		error;
967 
968 	/*
969 	 * If subcode is -1, there is no specific description for this error.
970 	 */
971 	if (subcode == -1)
972 		return;
973 
974 	/*
975 	 * We need to preserve errno across calls to this function to prevent it
976 	 * from being clobbered while here, or in the user's error handler.
977 	 */
978 	error = errno;
979 
980 	str = dgettext(TEXT_DOMAIN, errstr[subcode]);
981 
982 	va_start(ap, subcode);
983 	if (cpc->cpc_errfn != NULL)
984 		cpc->cpc_errfn(fn, subcode, str, ap);
985 	else {
986 		/*
987 		 * If printf() conversion specifiers are added to the errstr[]
988 		 * table, this call needs to be changed to vfprintf().
989 		 */
990 		(void) fprintf(stderr, "libcpc: %s: %s", fn, str);
991 	}
992 	va_end(ap);
993 
994 	errno = error;
995 }
996 
997 /*
998  * Hook used by libpctx to alert libcpc when a pctx handle is going away.
999  * This is necessary to prevent libcpc from attempting a libpctx operation on a
1000  * stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
1001  * to be notified when they go away.
1002  */
1003 static void
1004 cpc_invalidate_pctx(cpc_t *cpc, pctx_t *pctx)
1005 {
1006 	cpc_set_t	*set;
1007 	int		sigblocked;
1008 
1009 	sigblocked = cpc_lock(cpc);
1010 	for (set = cpc->cpc_sets; set != NULL; set = set->cs_next)
1011 		if (set->cs_pctx == pctx)
1012 			set->cs_pctx = NULL;
1013 	cpc_unlock(cpc, sigblocked);
1014 }
1015 
1016 /*
1017  * Check that the set is valid; if so it will be in the cpc handle's
1018  * list of sets. The lock protects the list of sets, but not the set
1019  * itself.
1020  */
1021 static int
1022 cpc_set_valid(cpc_t *cpc, cpc_set_t *set)
1023 {
1024 	cpc_set_t	*csp;
1025 	int		sigblocked;
1026 
1027 	sigblocked = cpc_lock(cpc);
1028 	for (csp = cpc->cpc_sets; csp != NULL; csp = csp->cs_next)
1029 		if (csp == set)
1030 			break;
1031 	cpc_unlock(cpc, sigblocked);
1032 	if (csp == NULL)
1033 		return (-1);
1034 	return (0);
1035 }
1036 
1037 static int
1038 cpc_lock(cpc_t *cpc)
1039 {
1040 	int ret = (sigset(SIGEMT, SIG_HOLD) == SIG_HOLD);
1041 	(void) mutex_lock(&cpc->cpc_lock);
1042 	return (ret);
1043 }
1044 
1045 static void
1046 cpc_unlock(cpc_t *cpc, int sigblocked)
1047 {
1048 	(void) mutex_unlock(&cpc->cpc_lock);
1049 	if (sigblocked == 0)
1050 		(void) sigrelse(SIGEMT);
1051 }
1052 
1053 struct priv {
1054 	const char *name;
1055 	int found;
1056 };
1057 
1058 /*ARGSUSED*/
1059 static void
1060 ev_walker(void *arg, uint_t picno, const char *ev)
1061 {
1062 	if (strcmp(((struct priv *)arg)->name, ev) == 0)
1063 		((struct priv *)arg)->found = 1;
1064 }
1065 
1066 static void
1067 at_walker(void *arg, const char *at)
1068 {
1069 	if (strcmp(((struct priv *)arg)->name, at) == 0)
1070 		((struct priv *)arg)->found = 1;
1071 }
1072 
1073 static int
1074 cpc_valid_event(cpc_t *cpc, uint_t pic, const char *ev)
1075 {
1076 	struct priv pr = { NULL, 0 };
1077 
1078 	pr.name = ev;
1079 	cpc_walk_events_pic(cpc, pic, &pr, ev_walker);
1080 	return (pr.found);
1081 }
1082 
1083 static int
1084 cpc_valid_attr(cpc_t *cpc, char *attr)
1085 {
1086 	struct priv pr = { NULL, 0 };
1087 
1088 	pr.name = attr;
1089 	cpc_walk_attrs(cpc, &pr, at_walker);
1090 	return (pr.found);
1091 }
1092