1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * The snmp library helps to prepare the PDUs and communicate with
29 * the snmp agent on the SP side via the ds_snmp driver.
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <thread.h>
37 #include <synch.h>
38 #include <errno.h>
39 #include <sys/time.h>
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <fcntl.h>
43 #include <libnvpair.h>
44 #include <sys/ds_snmp.h>
45
46 #include "libpiclsnmp.h"
47 #include "snmplib.h"
48 #include "asn1.h"
49 #include "pdu.h"
50 #include "debug.h"
51
52 #pragma init(libpiclsnmp_init) /* need this in .init */
53
54 /*
55 * Data from the MIB is fetched based on the hints about object
56 * groups received from (possibly many threads in) the application.
57 * However, the fetched data is kept in a common cache for use across
58 * all threads, so even a GETBULK is issued only when absolutely
59 * necessary.
60 *
61 * Note that locking is not fine grained (there's no locking per row)
62 * since we don't expect too many MT consumers right away.
63 *
64 */
65 static mutex_t mibcache_lock;
66 static nvlist_t **mibcache = NULL;
67 static uint_t n_mibcache_rows = 0;
68
69 static mutex_t snmp_reqid_lock;
70 static int snmp_reqid = 1;
71
72 #ifdef SNMP_DEBUG
73 uint_t snmp_nsends = 0;
74 uint_t snmp_sentbytes = 0;
75 uint_t snmp_nrecvs = 0;
76 uint_t snmp_rcvdbytes = 0;
77 #endif
78
79 #ifdef USE_SOCKETS
80 #define SNMP_DEFAULT_PORT 161
81 #define SNMP_MAX_RECV_PKTSZ (64 * 1024)
82 #endif
83
84 /*
85 * We need a reliably monotonic and stable source of time values to age
86 * entries in the mibcache toward expiration. The code originally used
87 * gettimeofday(), but since that is subject to time-of-day changes made by
88 * the administrator, the values it returns do not satisfy our needs.
89 * Instead, we use gethrtime(), which is immune to time-of-day changes.
90 * However, since gethrtime() returns a signed 64-bit value in units of
91 * nanoseconds and we are using signed 32-bit timestamps, we always divide
92 * the result by (HRTIME_SCALE * NANOSEC) to scale it down into units of 10
93 * seconds.
94 *
95 * Note that the scaling factor means that the value of MAX_INCACHE_TIME
96 * from snmplib.h should also be in units of 10 seconds.
97 */
98 #define GET_SCALED_HRTIME() (int)(gethrtime() / (HRTIME_SCALE * NANOSEC))
99
100 /*
101 * The mibcache code originally cached values for 300 seconds after fetching
102 * data via SNMP. Subsequent reads within that 300 second window would come
103 * from the cache - which is quite a bit faster than an SNMP query - but the
104 * first request that came in more than 300 seconds after the previous SNMP
105 * query would trigger a new SNMP query. This worked well as an
106 * optimization for frequent queries, but when data was only queried less
107 * frequently than every 300 seconds (as proved to be the case at multiple
108 * customer sites), the cache didn't help at all.
109 *
110 * To improve the performance of infrequent queries, code was added to the
111 * library to allow a client (i.e. a thread in the picl plugin) to proactively
112 * refresh cache entries without waiting for them to expire, thereby ensuring
113 * that all volatile entries in the cache at any given time are less than 300
114 * seconds old. Whenever an SNMP query is generated to retrieve volatile data
115 * that will be cached, an entry is added in a refresh queue that tracks the
116 * parameters of the query and the time that it was made. A client can query
117 * the age of the oldest item in the refresh queue and - at its discretion - can
118 * then force that query to be repeated in a manner that will update the
119 * mibcache entry even though it hasn't expired.
120 */
121 typedef struct {
122 struct picl_snmphdl *smd;
123 char *oidstrs;
124 int n_oids;
125 int row;
126 int last_fetch_time; /* in scaled hrtime */
127 } refreshq_job_t;
128
129 static mutex_t refreshq_lock;
130 static refreshq_job_t *refreshq = NULL;
131 static uint_t n_refreshq_slots = 0; /* # of alloc'ed job slots */
132 static uint_t n_refreshq_jobs = 0; /* # of unprocessed jobs */
133 static uint_t refreshq_next_job = 0; /* oldest unprocessed job */
134 static uint_t refreshq_next_slot = 0; /* next available job slot */
135
136
137 /*
138 * Static function declarations
139 */
140 static void libpiclsnmp_init(void);
141
142 static int lookup_int(char *, int, int *, int);
143 static int lookup_str(char *, int, char **, int);
144 static int lookup_bitstr(char *, int, uchar_t **, uint_t *, int);
145
146 static oidgroup_t *locate_oid_group(struct picl_snmphdl *, char *);
147 static int search_oid_in_group(char *, char *, int);
148
149 static snmp_pdu_t *fetch_single(struct picl_snmphdl *, char *, int, int *);
150 static snmp_pdu_t *fetch_next(struct picl_snmphdl *, char *, int, int *);
151 static void fetch_bulk(struct picl_snmphdl *, char *, int, int, int, int *);
152 static int fetch_single_str(struct picl_snmphdl *, char *, int,
153 char **, int *);
154 static int fetch_single_int(struct picl_snmphdl *, char *, int,
155 int *, int *);
156 static int fetch_single_bitstr(struct picl_snmphdl *, char *, int,
157 uchar_t **, uint_t *, int *);
158
159 static int snmp_send_request(struct picl_snmphdl *, snmp_pdu_t *, int *);
160 static int snmp_recv_reply(struct picl_snmphdl *, snmp_pdu_t *, int *);
161
162 static int mibcache_realloc(int);
163 static void mibcache_populate(snmp_pdu_t *, int);
164 static char *oid_to_oidstr(oid *, size_t);
165
166 static int refreshq_realloc(int);
167 static int refreshq_add_job(struct picl_snmphdl *, char *, int, int);
168
169
170 static void
libpiclsnmp_init(void)171 libpiclsnmp_init(void)
172 {
173 (void) mutex_init(&mibcache_lock, USYNC_THREAD, NULL);
174 if (mibcache_realloc(0) < 0)
175 (void) mutex_destroy(&mibcache_lock);
176
177 (void) mutex_init(&refreshq_lock, USYNC_THREAD, NULL);
178 (void) mutex_init(&snmp_reqid_lock, USYNC_THREAD, NULL);
179
180 LOGINIT();
181 }
182
183 picl_snmphdl_t
snmp_init()184 snmp_init()
185 {
186 struct picl_snmphdl *smd;
187 #ifdef USE_SOCKETS
188 int sbuf = (1 << 15); /* 16K */
189 int rbuf = (1 << 17); /* 64K */
190 char *snmp_agent_addr;
191 #endif
192
193 smd = (struct picl_snmphdl *)calloc(1, sizeof (struct picl_snmphdl));
194 if (smd == NULL)
195 return (NULL);
196
197 #ifdef USE_SOCKETS
198 if ((snmp_agent_addr = getenv("SNMP_AGENT_IPADDR")) == NULL)
199 return (NULL);
200
201 if ((smd->fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0)
202 return (NULL);
203
204 (void) setsockopt(smd->fd, SOL_SOCKET, SO_SNDBUF, &sbuf, sizeof (int));
205 (void) setsockopt(smd->fd, SOL_SOCKET, SO_RCVBUF, &rbuf, sizeof (int));
206
207 memset(&smd->agent_addr, 0, sizeof (struct sockaddr_in));
208 smd->agent_addr.sin_family = AF_INET;
209 smd->agent_addr.sin_port = htons(SNMP_DEFAULT_PORT);
210 smd->agent_addr.sin_addr.s_addr = inet_addr(snmp_agent_addr);
211 #else
212 smd->fd = open(DS_SNMP_DRIVER, O_RDWR);
213 if (smd->fd < 0) {
214 free(smd);
215 return (NULL);
216 }
217 #endif
218
219 return ((picl_snmphdl_t)smd);
220 }
221
222 void
snmp_fini(picl_snmphdl_t hdl)223 snmp_fini(picl_snmphdl_t hdl)
224 {
225 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
226
227 if (smd) {
228 if (smd->fd >= 0) {
229 (void) close(smd->fd);
230 }
231 free(smd);
232 }
233 }
234
235 int
snmp_reinit(picl_snmphdl_t hdl,int clr_linkreset)236 snmp_reinit(picl_snmphdl_t hdl, int clr_linkreset)
237 {
238 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
239 nvlist_t *nvl;
240 int i;
241
242 (void) mutex_lock(&mibcache_lock);
243
244 for (i = 0; i < n_mibcache_rows; i++) {
245 if ((nvl = mibcache[i]) != NULL)
246 nvlist_free(nvl);
247 }
248
249 n_mibcache_rows = 0;
250 if (mibcache) {
251 free(mibcache);
252 mibcache = NULL;
253 }
254
255 (void) mutex_unlock(&mibcache_lock);
256
257 if (clr_linkreset) {
258 if (smd == NULL || smd->fd < 0)
259 return (-1);
260 else
261 return (ioctl(smd->fd, DSSNMP_CLRLNKRESET, NULL));
262 }
263
264 return (0);
265 }
266
267 void
snmp_register_group(picl_snmphdl_t hdl,char * oidstrs,int n_oids,int is_vol)268 snmp_register_group(picl_snmphdl_t hdl, char *oidstrs, int n_oids, int is_vol)
269 {
270 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
271 oidgroup_t *oidg;
272 oidgroup_t *curr, *prev;
273 char *p;
274 int i, sz;
275
276 /*
277 * Allocate a new oidgroup_t
278 */
279 oidg = (oidgroup_t *)calloc(1, sizeof (struct oidgroup));
280 if (oidg == NULL)
281 return;
282
283 /*
284 * Determine how much space is required to register this group
285 */
286 sz = 0;
287 p = oidstrs;
288 for (i = 0; i < n_oids; i++) {
289 sz += strlen(p) + 1;
290 p = oidstrs + sz;
291 }
292
293 /*
294 * Create this oid group
295 */
296 if ((p = (char *)malloc(sz)) == NULL) {
297 free((void *) oidg);
298 return;
299 }
300
301 (void) memcpy(p, oidstrs, sz);
302
303 oidg->next = NULL;
304 oidg->oidstrs = p;
305 oidg->n_oids = n_oids;
306 oidg->is_volatile = is_vol;
307
308 /*
309 * Link it to the tail of the list of oid groups
310 */
311 for (prev = NULL, curr = smd->group; curr; curr = curr->next)
312 prev = curr;
313
314 if (prev == NULL)
315 smd->group = oidg;
316 else
317 prev->next = oidg;
318 }
319
320 /*
321 * snmp_get_int() takes in an OID and returns the integer value
322 * of the object referenced in the passed arg. It returns 0 on
323 * success and -1 on failure.
324 */
325 int
snmp_get_int(picl_snmphdl_t hdl,char * prefix,int row,int * val,int * snmp_syserr)326 snmp_get_int(picl_snmphdl_t hdl, char *prefix, int row, int *val,
327 int *snmp_syserr)
328 {
329 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
330 oidgroup_t *grp;
331 int ret;
332 int err = 0;
333
334 if (smd == NULL || prefix == NULL || val == NULL)
335 return (-1);
336
337 /*
338 * If this item should not be cached, fetch it directly from
339 * the agent using fetch_single_xxx()
340 */
341 if ((grp = locate_oid_group(smd, prefix)) == NULL) {
342 ret = fetch_single_int(smd, prefix, row, val, &err);
343
344 if (snmp_syserr)
345 *snmp_syserr = err;
346
347 return (ret);
348 }
349
350 /*
351 * is it in the cache ?
352 */
353 if (lookup_int(prefix, row, val, grp->is_volatile) == 0)
354 return (0);
355
356 /*
357 * fetch it from the agent and populate the cache
358 */
359 fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
360 if (snmp_syserr)
361 *snmp_syserr = err;
362
363 /*
364 * look it up again and return it
365 */
366 if (lookup_int(prefix, row, val, grp->is_volatile) < 0)
367 return (-1);
368
369 return (0);
370 }
371
372 /*
373 * snmp_get_str() takes in an OID and returns the string value
374 * of the object referenced in the passed arg. Memory for the string
375 * is allocated within snmp_get_str() and is expected to be freed by
376 * the caller when it is no longer needed. The function returns 0
377 * on success and -1 on failure.
378 */
379 int
snmp_get_str(picl_snmphdl_t hdl,char * prefix,int row,char ** strp,int * snmp_syserr)380 snmp_get_str(picl_snmphdl_t hdl, char *prefix, int row, char **strp,
381 int *snmp_syserr)
382 {
383 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
384 oidgroup_t *grp;
385 char *val;
386 int ret;
387 int err = 0;
388
389 if (smd == NULL || prefix == NULL || strp == NULL)
390 return (-1);
391
392 *strp = NULL;
393 /*
394 * Check if this item is cacheable or not. If not, call
395 * fetch_single_* to get it directly from the agent
396 */
397 if ((grp = locate_oid_group(smd, prefix)) == NULL) {
398 ret = fetch_single_str(smd, prefix, row, strp, &err);
399
400 if (snmp_syserr)
401 *snmp_syserr = err;
402
403 return (ret);
404 }
405
406 /*
407 * See if it's in the cache already
408 */
409 if (lookup_str(prefix, row, &val, grp->is_volatile) == 0) {
410 if ((*strp = strdup(val)) == NULL)
411 return (-1);
412 else
413 return (0);
414 }
415
416 /*
417 * Fetch it from the agent and populate cache
418 */
419 fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
420 if (snmp_syserr)
421 *snmp_syserr = err;
422
423 /*
424 * Retry lookup
425 */
426 if (lookup_str(prefix, row, &val, grp->is_volatile) < 0)
427 return (-1);
428
429
430 if ((*strp = strdup(val)) == NULL)
431 return (-1);
432 else
433 return (0);
434 }
435
436 /*
437 * snmp_get_bitstr() takes in an OID and returns the bit string value
438 * of the object referenced in the passed args. Memory for the bitstring
439 * is allocated within the function and is expected to be freed by
440 * the caller when it is no longer needed. The function returns 0
441 * on success and -1 on failure.
442 */
443 int
snmp_get_bitstr(picl_snmphdl_t hdl,char * prefix,int row,uchar_t ** bitstrp,uint_t * nbytes,int * snmp_syserr)444 snmp_get_bitstr(picl_snmphdl_t hdl, char *prefix, int row, uchar_t **bitstrp,
445 uint_t *nbytes, int *snmp_syserr)
446 {
447 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
448 oidgroup_t *grp;
449 uchar_t *val;
450 int ret;
451 int err = 0;
452
453 if (smd == NULL || prefix == NULL || bitstrp == NULL || nbytes == NULL)
454 return (-1);
455
456 *bitstrp = NULL;
457 /*
458 * Check if this item is cacheable or not. If not, call
459 * fetch_single_* to get it directly from the agent
460 */
461 if ((grp = locate_oid_group(smd, prefix)) == NULL) {
462 ret = fetch_single_bitstr(smd, prefix, row, bitstrp,
463 nbytes, &err);
464
465 if (snmp_syserr)
466 *snmp_syserr = err;
467
468 return (ret);
469 }
470
471 /*
472 * See if it's in the cache already
473 */
474 if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) == 0) {
475 if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
476 return (-1);
477 (void) memcpy(*bitstrp, (const void *)val, *nbytes);
478 return (0);
479 }
480
481 /*
482 * Fetch it from the agent and populate cache
483 */
484 fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
485 if (snmp_syserr)
486 *snmp_syserr = err;
487
488 /*
489 * Retry lookup
490 */
491 if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) < 0)
492 return (-1);
493
494 if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
495 return (-1);
496 (void) memcpy(*bitstrp, (const void *)val, *nbytes);
497
498 return (0);
499 }
500
501 /*
502 * snmp_get_nextrow() is similar in operation to SNMP_GETNEXT, but
503 * only just. In particular, this is only expected to return the next
504 * valid row number for the same object, not its value. Since we don't
505 * have any other means, we use this to determine the number of rows
506 * in the table (and the valid ones). This function returns 0 on success
507 * and -1 on failure.
508 */
509 int
snmp_get_nextrow(picl_snmphdl_t hdl,char * prefix,int row,int * nextrow,int * snmp_syserr)510 snmp_get_nextrow(picl_snmphdl_t hdl, char *prefix, int row, int *nextrow,
511 int *snmp_syserr)
512 {
513 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
514 snmp_pdu_t *reply_pdu;
515 pdu_varlist_t *vp;
516 char *nxt_oidstr;
517 int err = 0;
518
519 if (smd == NULL || prefix == NULL || nextrow == NULL) {
520 if (snmp_syserr)
521 *snmp_syserr = EINVAL;
522 return (-1);
523 }
524
525 /*
526 * The get_nextrow results should *never* go into any cache,
527 * since these relationships are dynamically discovered each time.
528 */
529 if ((reply_pdu = fetch_next(smd, prefix, row, &err)) == NULL) {
530 if (snmp_syserr)
531 *snmp_syserr = err;
532 return (-1);
533 }
534
535 /*
536 * We are not concerned about the "value" of the lexicographically
537 * next object; we only care about the name of that object and
538 * its row number (and whether such an object exists or not).
539 */
540 vp = reply_pdu->vars;
541
542 /*
543 * This indicates that we're at the end of the MIB view.
544 */
545 if (vp == NULL || vp->name == NULL || vp->type == SNMP_NOSUCHOBJECT ||
546 vp->type == SNMP_NOSUCHINSTANCE || vp->type == SNMP_ENDOFMIBVIEW) {
547 snmp_free_pdu(reply_pdu);
548 if (snmp_syserr)
549 *snmp_syserr = ENOSPC;
550 return (-1);
551 }
552
553 /*
554 * need to be able to convert the OID
555 */
556 if ((nxt_oidstr = oid_to_oidstr(vp->name, vp->name_len - 1)) == NULL) {
557 snmp_free_pdu(reply_pdu);
558 if (snmp_syserr)
559 *snmp_syserr = ENOMEM;
560 return (-1);
561 }
562
563 /*
564 * We're on to the next table.
565 */
566 if (strcmp(nxt_oidstr, prefix) != 0) {
567 free(nxt_oidstr);
568 snmp_free_pdu(reply_pdu);
569 if (snmp_syserr)
570 *snmp_syserr = ENOENT;
571 return (-1);
572 }
573
574 /*
575 * Ok, so we've got an oid that's simply the next valid row of the
576 * passed on object, return this row number.
577 */
578 *nextrow = (vp->name)[vp->name_len-1];
579
580 free(nxt_oidstr);
581 snmp_free_pdu(reply_pdu);
582
583 return (0);
584 }
585
586 /*
587 * Request ids for snmp messages to the agent are sequenced here.
588 */
589 int
snmp_get_reqid(void)590 snmp_get_reqid(void)
591 {
592 int ret;
593
594 (void) mutex_lock(&snmp_reqid_lock);
595
596 ret = snmp_reqid++;
597
598 (void) mutex_unlock(&snmp_reqid_lock);
599
600 return (ret);
601 }
602
603 static int
lookup_int(char * prefix,int row,int * valp,int is_vol)604 lookup_int(char *prefix, int row, int *valp, int is_vol)
605 {
606 int32_t *val_arr;
607 uint_t nelem;
608 int now;
609 int elapsed;
610
611 (void) mutex_lock(&mibcache_lock);
612
613 if (row >= n_mibcache_rows) {
614 (void) mutex_unlock(&mibcache_lock);
615 return (-1);
616 }
617
618 if (mibcache[row] == NULL) {
619 (void) mutex_unlock(&mibcache_lock);
620 return (-1);
621 }
622
623 /*
624 * If this is a volatile property, we should be searching
625 * for an integer-timestamp pair
626 */
627 if (is_vol) {
628 if (nvlist_lookup_int32_array(mibcache[row], prefix,
629 &val_arr, &nelem) != 0) {
630 (void) mutex_unlock(&mibcache_lock);
631 return (-1);
632 }
633 if (nelem != 2 || val_arr[1] < 0) {
634 (void) mutex_unlock(&mibcache_lock);
635 return (-1);
636 }
637 now = GET_SCALED_HRTIME();
638 elapsed = now - val_arr[1];
639 if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
640 (void) mutex_unlock(&mibcache_lock);
641 return (-1);
642 }
643
644 *valp = (int)val_arr[0];
645 } else {
646 if (nvlist_lookup_int32(mibcache[row], prefix, valp) != 0) {
647 (void) mutex_unlock(&mibcache_lock);
648 return (-1);
649 }
650 }
651
652 (void) mutex_unlock(&mibcache_lock);
653
654 return (0);
655 }
656
657 static int
lookup_str(char * prefix,int row,char ** valp,int is_vol)658 lookup_str(char *prefix, int row, char **valp, int is_vol)
659 {
660 char **val_arr;
661 uint_t nelem;
662 int now;
663 int elapsed;
664
665 (void) mutex_lock(&mibcache_lock);
666
667 if (row >= n_mibcache_rows) {
668 (void) mutex_unlock(&mibcache_lock);
669 return (-1);
670 }
671
672 if (mibcache[row] == NULL) {
673 (void) mutex_unlock(&mibcache_lock);
674 return (-1);
675 }
676
677 /*
678 * If this is a volatile property, we should be searching
679 * for a string-timestamp pair
680 */
681 if (is_vol) {
682 if (nvlist_lookup_string_array(mibcache[row], prefix,
683 &val_arr, &nelem) != 0) {
684 (void) mutex_unlock(&mibcache_lock);
685 return (-1);
686 }
687 if (nelem != 2 || atoi(val_arr[1]) <= 0) {
688 (void) mutex_unlock(&mibcache_lock);
689 return (-1);
690 }
691 now = GET_SCALED_HRTIME();
692 elapsed = now - atoi(val_arr[1]);
693 if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
694 (void) mutex_unlock(&mibcache_lock);
695 return (-1);
696 }
697
698 *valp = val_arr[0];
699 } else {
700 if (nvlist_lookup_string(mibcache[row], prefix, valp) != 0) {
701 (void) mutex_unlock(&mibcache_lock);
702 return (-1);
703 }
704 }
705
706 (void) mutex_unlock(&mibcache_lock);
707
708 return (0);
709 }
710
711 static int
lookup_bitstr(char * prefix,int row,uchar_t ** valp,uint_t * nelem,int is_vol)712 lookup_bitstr(char *prefix, int row, uchar_t **valp, uint_t *nelem, int is_vol)
713 {
714 (void) mutex_lock(&mibcache_lock);
715
716 if (row >= n_mibcache_rows) {
717 (void) mutex_unlock(&mibcache_lock);
718 return (-1);
719 }
720
721 if (mibcache[row] == NULL) {
722 (void) mutex_unlock(&mibcache_lock);
723 return (-1);
724 }
725
726 /*
727 * We don't support volatile bit string values yet. The nvlist
728 * functions don't support bitstring arrays like they do charstring
729 * arrays, so we would need to do things in a convoluted way,
730 * probably by attaching the timestamp as part of the byte array
731 * itself. However, the need for volatile bitstrings isn't there
732 * yet, to justify the effort.
733 */
734 if (is_vol) {
735 (void) mutex_unlock(&mibcache_lock);
736 return (-1);
737 }
738
739 if (nvlist_lookup_byte_array(mibcache[row], prefix, valp, nelem) != 0) {
740 (void) mutex_unlock(&mibcache_lock);
741 return (-1);
742 }
743
744 (void) mutex_unlock(&mibcache_lock);
745
746 return (0);
747 }
748
749 static int
search_oid_in_group(char * prefix,char * oidstrs,int n_oids)750 search_oid_in_group(char *prefix, char *oidstrs, int n_oids)
751 {
752 char *p;
753 int i;
754
755 p = oidstrs;
756 for (i = 0; i < n_oids; i++) {
757 if (strcmp(p, prefix) == 0)
758 return (0);
759
760 p += strlen(p) + 1;
761 }
762
763 return (-1);
764 }
765
766 static oidgroup_t *
locate_oid_group(struct picl_snmphdl * smd,char * prefix)767 locate_oid_group(struct picl_snmphdl *smd, char *prefix)
768 {
769 oidgroup_t *grp;
770
771 if (smd == NULL)
772 return (NULL);
773
774 if (smd->group == NULL)
775 return (NULL);
776
777 for (grp = smd->group; grp; grp = grp->next) {
778 if (search_oid_in_group(prefix, grp->oidstrs,
779 grp->n_oids) == 0) {
780 return (grp);
781 }
782 }
783
784 return (NULL);
785 }
786
787 static int
fetch_single_int(struct picl_snmphdl * smd,char * prefix,int row,int * ival,int * snmp_syserr)788 fetch_single_int(struct picl_snmphdl *smd, char *prefix, int row, int *ival,
789 int *snmp_syserr)
790 {
791 snmp_pdu_t *reply_pdu;
792 pdu_varlist_t *vp;
793
794 if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
795 return (-1);
796
797 /*
798 * Note that we don't make any distinction between unsigned int
799 * value and signed int value at this point, since we provide
800 * only snmp_get_int() at the higher level. While it is possible
801 * to provide an entirely separate interface such as snmp_get_uint(),
802 * that's quite unnecessary, because we don't do any interpretation
803 * of the received value. Besides, the sizes of int and uint are
804 * the same and the sizes of all pointers are the same (so val.iptr
805 * would be the same as val.uiptr in pdu_varlist_t). If/when we
806 * violate any of these assumptions, it will be time to add
807 * snmp_get_uint().
808 */
809 vp = reply_pdu->vars;
810 if (vp == NULL || vp->val.iptr == NULL) {
811 snmp_free_pdu(reply_pdu);
812 return (-1);
813 }
814
815 *ival = *(vp->val.iptr);
816
817 snmp_free_pdu(reply_pdu);
818
819 return (0);
820 }
821
822 static int
fetch_single_str(struct picl_snmphdl * smd,char * prefix,int row,char ** valp,int * snmp_syserr)823 fetch_single_str(struct picl_snmphdl *smd, char *prefix, int row, char **valp,
824 int *snmp_syserr)
825 {
826 snmp_pdu_t *reply_pdu;
827 pdu_varlist_t *vp;
828
829 if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
830 return (-1);
831
832 vp = reply_pdu->vars;
833 if (vp == NULL || vp->val.str == NULL) {
834 snmp_free_pdu(reply_pdu);
835 return (-1);
836 }
837
838 *valp = strdup((const char *)(vp->val.str));
839
840 snmp_free_pdu(reply_pdu);
841
842 return (0);
843 }
844
845 static int
fetch_single_bitstr(struct picl_snmphdl * smd,char * prefix,int row,uchar_t ** valp,uint_t * nelem,int * snmp_syserr)846 fetch_single_bitstr(struct picl_snmphdl *smd, char *prefix, int row,
847 uchar_t **valp, uint_t *nelem, int *snmp_syserr)
848 {
849 snmp_pdu_t *reply_pdu;
850 pdu_varlist_t *vp;
851
852 if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
853 return (-1);
854
855 vp = reply_pdu->vars;
856 if (vp == NULL || vp->val.str == NULL) {
857 snmp_free_pdu(reply_pdu);
858 return (-1);
859 }
860
861 if ((*valp = (uchar_t *)calloc(vp->val_len, 1)) == NULL) {
862 snmp_free_pdu(reply_pdu);
863 return (-1);
864 }
865
866 *nelem = vp->val_len;
867 (void) memcpy(*valp, (const void *)(vp->val.str),
868 (size_t)(vp->val_len));
869
870 snmp_free_pdu(reply_pdu);
871
872 return (0);
873 }
874
875 static snmp_pdu_t *
fetch_single(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)876 fetch_single(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
877 {
878 snmp_pdu_t *pdu, *reply_pdu;
879
880 LOGGET(TAG_CMD_REQUEST, prefix, row);
881
882 if ((pdu = snmp_create_pdu(SNMP_MSG_GET, 0, prefix, 1, row)) == NULL)
883 return (NULL);
884
885 LOGPDU(TAG_REQUEST_PDU, pdu);
886
887 if (snmp_make_packet(pdu) < 0) {
888 snmp_free_pdu(pdu);
889 return (NULL);
890 }
891
892 LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
893
894 if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
895 snmp_free_pdu(pdu);
896 return (NULL);
897 }
898
899 if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
900 snmp_free_pdu(pdu);
901 return (NULL);
902 }
903
904 LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
905
906 reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
907 pdu->reply_pktsz);
908
909 LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
910
911 snmp_free_pdu(pdu);
912
913 return (reply_pdu);
914 }
915
916 static void
fetch_bulk(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row,int is_vol,int * snmp_syserr)917 fetch_bulk(struct picl_snmphdl *smd, char *oidstrs, int n_oids,
918 int row, int is_vol, int *snmp_syserr)
919 {
920 snmp_pdu_t *pdu, *reply_pdu;
921 int max_reps;
922
923 LOGBULK(TAG_CMD_REQUEST, n_oids, oidstrs, row);
924
925 /*
926 * If we're fetching volatile properties using BULKGET, don't
927 * venture to get multiple rows (passing max_reps=0 will make
928 * snmp_create_pdu() fetch SNMP_DEF_MAX_REPETITIONS rows)
929 */
930 max_reps = is_vol ? 1 : 0;
931
932 pdu = snmp_create_pdu(SNMP_MSG_GETBULK, max_reps, oidstrs, n_oids, row);
933 if (pdu == NULL)
934 return;
935
936 LOGPDU(TAG_REQUEST_PDU, pdu);
937
938 /*
939 * Make an ASN.1 encoded packet from the PDU information
940 */
941 if (snmp_make_packet(pdu) < 0) {
942 snmp_free_pdu(pdu);
943 return;
944 }
945
946 LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
947
948 /*
949 * Send the request packet to the agent
950 */
951 if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
952 snmp_free_pdu(pdu);
953 return;
954 }
955
956 /*
957 * Receive response from the agent into the reply packet buffer
958 * in the request PDU
959 */
960 if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
961 snmp_free_pdu(pdu);
962 return;
963 }
964
965 LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
966
967 /*
968 * Parse the reply, validate the response and create a
969 * reply-PDU out of the information. Populate the mibcache
970 * with the received values.
971 */
972 reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
973 pdu->reply_pktsz);
974 if (reply_pdu) {
975 LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
976
977 if (reply_pdu->errstat == SNMP_ERR_NOERROR) {
978 if (is_vol) {
979 /* Add a job to the cache refresh work queue */
980 (void) refreshq_add_job(smd, oidstrs, n_oids,
981 row);
982 }
983
984 mibcache_populate(reply_pdu, is_vol);
985 }
986
987 snmp_free_pdu(reply_pdu);
988 }
989
990 snmp_free_pdu(pdu);
991 }
992
993 static snmp_pdu_t *
fetch_next(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)994 fetch_next(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
995 {
996 snmp_pdu_t *pdu, *reply_pdu;
997
998 LOGNEXT(TAG_CMD_REQUEST, prefix, row);
999
1000 pdu = snmp_create_pdu(SNMP_MSG_GETNEXT, 0, prefix, 1, row);
1001 if (pdu == NULL)
1002 return (NULL);
1003
1004 LOGPDU(TAG_REQUEST_PDU, pdu);
1005
1006 if (snmp_make_packet(pdu) < 0) {
1007 snmp_free_pdu(pdu);
1008 return (NULL);
1009 }
1010
1011 LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
1012
1013 if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
1014 snmp_free_pdu(pdu);
1015 return (NULL);
1016 }
1017
1018 if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
1019 snmp_free_pdu(pdu);
1020 return (NULL);
1021 }
1022
1023 LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
1024
1025 reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
1026 pdu->reply_pktsz);
1027
1028 LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
1029
1030 snmp_free_pdu(pdu);
1031
1032 return (reply_pdu);
1033 }
1034
1035 static int
snmp_send_request(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)1036 snmp_send_request(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
1037 {
1038 extern int errno;
1039 #ifdef USE_SOCKETS
1040 int ret;
1041 #endif
1042
1043 if (smd->fd < 0)
1044 return (-1);
1045
1046 if (pdu == NULL || pdu->req_pkt == NULL)
1047 return (-1);
1048
1049 #ifdef USE_SOCKETS
1050 ret = -1;
1051 while (ret < 0) {
1052 LOGIO(TAG_SENDTO, smd->fd, pdu->req_pkt, pdu->req_pktsz);
1053
1054 ret = sendto(smd->fd, pdu->req_pkt, pdu->req_pktsz, 0,
1055 (struct sockaddr *)&smd->agent_addr,
1056 sizeof (struct sockaddr));
1057 if (ret < 0 && errno != EINTR) {
1058 return (-1);
1059 }
1060 }
1061 #else
1062 LOGIO(TAG_WRITE, smd->fd, pdu->req_pkt, pdu->req_pktsz);
1063
1064 if (write(smd->fd, pdu->req_pkt, pdu->req_pktsz) < 0) {
1065 if (snmp_syserr)
1066 *snmp_syserr = errno;
1067 return (-1);
1068 }
1069 #endif
1070
1071 #ifdef SNMP_DEBUG
1072 snmp_nsends++;
1073 snmp_sentbytes += pdu->req_pktsz;
1074 #endif
1075
1076 return (0);
1077 }
1078
1079 static int
snmp_recv_reply(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)1080 snmp_recv_reply(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
1081 {
1082 struct dssnmp_info snmp_info;
1083 size_t pktsz;
1084 uchar_t *pkt;
1085 extern int errno;
1086 #ifdef USE_SOCKETS
1087 struct sockaddr_in from;
1088 int fromlen;
1089 ssize_t msgsz;
1090 #endif
1091
1092 if (smd->fd < 0 || pdu == NULL)
1093 return (-1);
1094
1095 #ifdef USE_SOCKETS
1096 if ((pkt = (uchar_t *)calloc(1, SNMP_MAX_RECV_PKTSZ)) == NULL)
1097 return (-1);
1098
1099 fromlen = sizeof (struct sockaddr_in);
1100
1101 LOGIO(TAG_RECVFROM, smd->fd, pkt, SNMP_MAX_RECV_PKTSZ);
1102
1103 msgsz = recvfrom(smd->fd, pkt, SNMP_MAX_RECV_PKTSZ, 0,
1104 (struct sockaddr *)&from, &fromlen);
1105 if (msgsz < 0 || msgsz >= SNMP_MAX_RECV_PKTSZ) {
1106 free(pkt);
1107 return (-1);
1108 }
1109
1110 pktsz = (size_t)msgsz;
1111 #else
1112 LOGIO(TAG_IOCTL, smd->fd, DSSNMP_GETINFO, &snmp_info);
1113
1114 /*
1115 * The ioctl will block until we have snmp data available
1116 */
1117 if (ioctl(smd->fd, DSSNMP_GETINFO, &snmp_info) < 0) {
1118 if (snmp_syserr)
1119 *snmp_syserr = errno;
1120 return (-1);
1121 }
1122
1123 pktsz = snmp_info.size;
1124 if ((pkt = (uchar_t *)calloc(1, pktsz)) == NULL)
1125 return (-1);
1126
1127 LOGIO(TAG_READ, smd->fd, pkt, pktsz);
1128
1129 if (read(smd->fd, pkt, pktsz) < 0) {
1130 free(pkt);
1131 if (snmp_syserr)
1132 *snmp_syserr = errno;
1133 return (-1);
1134 }
1135 #endif
1136
1137 pdu->reply_pkt = pkt;
1138 pdu->reply_pktsz = pktsz;
1139
1140 #ifdef SNMP_DEBUG
1141 snmp_nrecvs++;
1142 snmp_rcvdbytes += pktsz;
1143 #endif
1144
1145 return (0);
1146 }
1147
1148 static int
mibcache_realloc(int hint)1149 mibcache_realloc(int hint)
1150 {
1151 uint_t count = (uint_t)hint;
1152 nvlist_t **p;
1153
1154 if (hint < 0)
1155 return (-1);
1156
1157 (void) mutex_lock(&mibcache_lock);
1158
1159 if (hint < n_mibcache_rows) {
1160 (void) mutex_unlock(&mibcache_lock);
1161 return (0);
1162 }
1163
1164 count = ((count >> MIBCACHE_BLK_SHIFT) + 1) << MIBCACHE_BLK_SHIFT;
1165
1166 p = (nvlist_t **)calloc(count, sizeof (nvlist_t *));
1167 if (p == NULL) {
1168 (void) mutex_unlock(&mibcache_lock);
1169 return (-1);
1170 }
1171
1172 if (mibcache) {
1173 (void) memcpy((void *) p, (void *) mibcache,
1174 n_mibcache_rows * sizeof (nvlist_t *));
1175 free((void *) mibcache);
1176 }
1177
1178 mibcache = p;
1179 n_mibcache_rows = count;
1180
1181 (void) mutex_unlock(&mibcache_lock);
1182
1183 return (0);
1184 }
1185
1186
1187 /*
1188 * Scan each variable in the returned PDU's bindings and populate
1189 * the cache appropriately
1190 */
1191 static void
mibcache_populate(snmp_pdu_t * pdu,int is_vol)1192 mibcache_populate(snmp_pdu_t *pdu, int is_vol)
1193 {
1194 pdu_varlist_t *vp;
1195 int row, ret;
1196 char *oidstr;
1197 int tod; /* in secs */
1198 char tod_str[MAX_INT_LEN];
1199 int ival_arr[2];
1200 char *sval_arr[2];
1201
1202 /*
1203 * If we're populating volatile properties, we also store a
1204 * timestamp with each property value. When we lookup, we check the
1205 * current time against this timestamp to determine if we need to
1206 * refetch the value or not (refetch if it has been in for far too
1207 * long).
1208 */
1209
1210 if (is_vol) {
1211 tod = GET_SCALED_HRTIME();
1212
1213 tod_str[0] = 0;
1214 (void) snprintf(tod_str, MAX_INT_LEN, "%d", tod);
1215
1216 ival_arr[1] = tod;
1217 sval_arr[1] = (char *)tod_str;
1218 }
1219
1220 for (vp = pdu->vars; vp; vp = vp->nextvar) {
1221 if (vp->type != ASN_INTEGER && vp->type != ASN_OCTET_STR &&
1222 vp->type != ASN_BIT_STR) {
1223 continue;
1224 }
1225
1226 if (vp->name == NULL || vp->val.str == NULL)
1227 continue;
1228
1229 row = (vp->name)[vp->name_len-1];
1230
1231 (void) mutex_lock(&mibcache_lock);
1232
1233 if (row >= n_mibcache_rows) {
1234 (void) mutex_unlock(&mibcache_lock);
1235 if (mibcache_realloc(row) < 0)
1236 continue;
1237 (void) mutex_lock(&mibcache_lock);
1238 }
1239 ret = 0;
1240 if (mibcache[row] == NULL)
1241 ret = nvlist_alloc(&mibcache[row], NV_UNIQUE_NAME, 0);
1242
1243 (void) mutex_unlock(&mibcache_lock);
1244
1245 if (ret != 0)
1246 continue;
1247
1248 /*
1249 * Convert the standard OID form into an oid string that
1250 * we can use as the key to lookup. Since we only search
1251 * by the prefix (mibcache is really an array of nvlist_t
1252 * pointers), ignore the leaf subid.
1253 */
1254 oidstr = oid_to_oidstr(vp->name, vp->name_len - 1);
1255 if (oidstr == NULL)
1256 continue;
1257
1258 (void) mutex_lock(&mibcache_lock);
1259
1260 if (vp->type == ASN_INTEGER) {
1261 if (is_vol) {
1262 ival_arr[0] = *(vp->val.iptr);
1263 (void) nvlist_add_int32_array(mibcache[row],
1264 oidstr, ival_arr, 2);
1265 } else {
1266 (void) nvlist_add_int32(mibcache[row],
1267 oidstr, *(vp->val.iptr));
1268 }
1269
1270 } else if (vp->type == ASN_OCTET_STR) {
1271 if (is_vol) {
1272 sval_arr[0] = (char *)vp->val.str;
1273 (void) nvlist_add_string_array(mibcache[row],
1274 oidstr, sval_arr, 2);
1275 } else {
1276 (void) nvlist_add_string(mibcache[row],
1277 oidstr, (const char *)(vp->val.str));
1278 }
1279 } else if (vp->type == ASN_BIT_STR) {
1280 /*
1281 * We don't support yet bit string objects that are
1282 * volatile values.
1283 */
1284 if (!is_vol) {
1285 (void) nvlist_add_byte_array(mibcache[row],
1286 oidstr, (uchar_t *)(vp->val.str),
1287 (uint_t)vp->val_len);
1288 }
1289 }
1290 (void) mutex_unlock(&mibcache_lock);
1291
1292 free(oidstr);
1293 }
1294 }
1295
1296 static char *
oid_to_oidstr(oid * objid,size_t n_subids)1297 oid_to_oidstr(oid *objid, size_t n_subids)
1298 {
1299 char *oidstr;
1300 char subid_str[MAX_INT_LEN];
1301 int i, isize;
1302 size_t oidstr_sz;
1303
1304 /*
1305 * ugly, but for now this will have to do.
1306 */
1307 oidstr_sz = sizeof (subid_str) * n_subids;
1308 oidstr = calloc(1, oidstr_sz);
1309
1310 for (i = 0; i < n_subids; i++) {
1311 (void) memset(subid_str, 0, sizeof (subid_str));
1312 isize = snprintf(subid_str, sizeof (subid_str), "%d",
1313 objid[i]);
1314 if (isize >= sizeof (subid_str))
1315 return (NULL);
1316
1317 (void) strlcat(oidstr, subid_str, oidstr_sz);
1318 if (i < (n_subids - 1))
1319 (void) strlcat(oidstr, ".", oidstr_sz);
1320 }
1321
1322 return (oidstr);
1323 }
1324
1325 /*
1326 * Expand the refreshq to hold more cache refresh jobs. Caller must already
1327 * hold refreshq_lock mutex. Every expansion of the refreshq will add
1328 * REFRESH_BLK_SZ job slots, rather than expanding by one slot every time more
1329 * space is needed.
1330 */
1331 static int
refreshq_realloc(int hint)1332 refreshq_realloc(int hint)
1333 {
1334 uint_t count = (uint_t)hint;
1335 refreshq_job_t *p;
1336
1337 if (hint < 0)
1338 return (-1);
1339
1340 if (hint < n_refreshq_slots) {
1341 return (0);
1342 }
1343
1344 /* Round count up to next multiple of REFRESHQ_BLK_SHIFT */
1345 count = ((count >> REFRESHQ_BLK_SHIFT) + 1) << REFRESHQ_BLK_SHIFT;
1346
1347 p = (refreshq_job_t *)calloc(count, sizeof (refreshq_job_t));
1348 if (p == NULL) {
1349 return (-1);
1350 }
1351
1352 if (refreshq) {
1353 if (n_refreshq_jobs == 0) {
1354 /* Simple case, nothing to copy */
1355 refreshq_next_job = 0;
1356 refreshq_next_slot = 0;
1357 } else if (refreshq_next_slot > refreshq_next_job) {
1358 /* Simple case, single copy preserves everything */
1359 (void) memcpy((void *) p,
1360 (void *) &(refreshq[refreshq_next_job]),
1361 n_refreshq_jobs * sizeof (refreshq_job_t));
1362 } else {
1363 /*
1364 * Complex case. The jobs in the refresh queue wrap
1365 * around the end of the array in which they are stored.
1366 * To preserve chronological order in the new allocated
1367 * array, we need to copy the jobs at the end of the old
1368 * array to the beginning of the new one and place the
1369 * jobs from the beginning of the old array after them.
1370 */
1371 uint_t tail_jobs, head_jobs;
1372
1373 tail_jobs = n_refreshq_slots - refreshq_next_job;
1374 head_jobs = n_refreshq_jobs - tail_jobs;
1375
1376 /* Copy the jobs from the end of the old array */
1377 (void) memcpy((void *) p,
1378 (void *) &(refreshq[refreshq_next_job]),
1379 tail_jobs * sizeof (refreshq_job_t));
1380
1381 /* Copy the jobs from the beginning of the old array */
1382 (void) memcpy((void *) &(p[tail_jobs]),
1383 (void *) &(refreshq[0]),
1384 head_jobs * sizeof (refreshq_job_t));
1385
1386 /* update the job and slot indices to match */
1387 refreshq_next_job = 0;
1388 refreshq_next_slot = n_refreshq_jobs;
1389 }
1390 free((void *) refreshq);
1391 } else {
1392 /* First initialization */
1393 refreshq_next_job = 0;
1394 refreshq_next_slot = 0;
1395 n_refreshq_jobs = 0;
1396 }
1397
1398 refreshq = p;
1399 n_refreshq_slots = count;
1400
1401 return (0);
1402 }
1403
1404 /*
1405 * Add a new job to the refreshq. If there aren't any open slots, attempt to
1406 * expand the queue first. Return -1 if unable to add the job to the work
1407 * queue, or 0 if the job was added OR if an existing job with the same
1408 * parameters is already pending.
1409 */
1410 static int
refreshq_add_job(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row)1411 refreshq_add_job(struct picl_snmphdl *smd, char *oidstrs, int n_oids, int row)
1412 {
1413 int i;
1414 int job;
1415
1416 (void) mutex_lock(&refreshq_lock);
1417
1418 /*
1419 * Can't do anything without a queue. Either the client never
1420 * initialized the refresh queue or the initial memory allocation
1421 * failed.
1422 */
1423 if (refreshq == NULL) {
1424 (void) mutex_unlock(&refreshq_lock);
1425 return (-1);
1426 }
1427
1428 /*
1429 * If there is already a job pending with the same parameters as the job
1430 * we have been asked to add, we apparently let an entry expire and it
1431 * is now being reloaded. Rather than add another job for the same
1432 * entry, we skip adding the new job and let the existing job address
1433 * it.
1434 */
1435 for (i = 0, job = refreshq_next_job; i < n_refreshq_jobs; i++,
1436 job = (job + 1) % n_refreshq_slots) {
1437 if ((refreshq[job].row == row) &&
1438 (refreshq[job].n_oids == n_oids) &&
1439 (refreshq[job].oidstrs == oidstrs)) {
1440 (void) mutex_unlock(&refreshq_lock);
1441 return (0);
1442 }
1443 }
1444
1445
1446 /*
1447 * If the queue is full, we need to expand it
1448 */
1449 if (n_refreshq_jobs == n_refreshq_slots) {
1450 if (refreshq_realloc(n_refreshq_slots + 1) < 0) {
1451 /*
1452 * Can't expand the job queue, so we drop this job on
1453 * the floor. No data is lost... we just allow some
1454 * data in the mibcache to expire.
1455 */
1456 (void) mutex_unlock(&refreshq_lock);
1457 return (-1);
1458 }
1459 }
1460
1461 /*
1462 * There is room in the queue, so add the new job. We are actually
1463 * taking a timestamp for this job that is slightly earlier than when
1464 * the mibcache entry will be updated, but since we're trying to update
1465 * the mibcache entry before it expires anyway, the earlier timestamp
1466 * here is acceptable.
1467 */
1468 refreshq[refreshq_next_slot].smd = smd;
1469 refreshq[refreshq_next_slot].oidstrs = oidstrs;
1470 refreshq[refreshq_next_slot].n_oids = n_oids;
1471 refreshq[refreshq_next_slot].row = row;
1472 refreshq[refreshq_next_slot].last_fetch_time = GET_SCALED_HRTIME();
1473
1474 /*
1475 * Update queue management variables
1476 */
1477 n_refreshq_jobs += 1;
1478 refreshq_next_slot = (refreshq_next_slot + 1) % n_refreshq_slots;
1479
1480 (void) mutex_unlock(&refreshq_lock);
1481
1482 return (0);
1483 }
1484
1485 /*
1486 * Almost all of the refresh code remains dormant unless specifically
1487 * initialized by a client (the exception being that fetch_bulk() will still
1488 * call refreshq_add_job(), but the latter will return without doing anything).
1489 */
1490 int
snmp_refresh_init(void)1491 snmp_refresh_init(void)
1492 {
1493 int ret;
1494
1495 (void) mutex_lock(&refreshq_lock);
1496
1497 ret = refreshq_realloc(0);
1498
1499 (void) mutex_unlock(&refreshq_lock);
1500
1501 return (ret);
1502 }
1503
1504 /*
1505 * If the client is going away, we don't want to keep doing refresh work, so
1506 * clean everything up.
1507 */
1508 void
snmp_refresh_fini(void)1509 snmp_refresh_fini(void)
1510 {
1511 (void) mutex_lock(&refreshq_lock);
1512
1513 n_refreshq_jobs = 0;
1514 n_refreshq_slots = 0;
1515 refreshq_next_job = 0;
1516 refreshq_next_slot = 0;
1517 free(refreshq);
1518 refreshq = NULL;
1519
1520 (void) mutex_unlock(&refreshq_lock);
1521 }
1522
1523 /*
1524 * Return the number of seconds remaining before the mibcache entry associated
1525 * with the next job in the queue will expire. Note that this requires
1526 * reversing the scaling normally done on hrtime values. (The need for scaling
1527 * is purely internal, and should be hidden from clients.) If there are no jobs
1528 * in the queue, return -1. If the next job has already expired, return 0.
1529 */
1530 int
snmp_refresh_get_next_expiration(void)1531 snmp_refresh_get_next_expiration(void)
1532 {
1533 int ret;
1534 int elapsed;
1535
1536 (void) mutex_lock(&refreshq_lock);
1537
1538 if (n_refreshq_jobs == 0) {
1539 ret = -1;
1540 } else {
1541 elapsed = GET_SCALED_HRTIME() -
1542 refreshq[refreshq_next_job].last_fetch_time;
1543
1544 if (elapsed >= MAX_INCACHE_TIME) {
1545 ret = 0;
1546 } else {
1547 ret = (MAX_INCACHE_TIME - elapsed) * HRTIME_SCALE;
1548 }
1549 }
1550
1551 (void) mutex_unlock(&refreshq_lock);
1552
1553 return (ret);
1554 }
1555
1556 /*
1557 * Given the number of seconds the client wants to spend on each cyle of
1558 * processing jobs and then sleeping, return a suggestion for the number of jobs
1559 * the client should process, calculated by dividing the client's cycle duration
1560 * by MAX_INCACHE_TIME and multiplying the result by the total number of jobs in
1561 * the queue. (Note that the actual implementation of that calculation is done
1562 * in a different order to avoid losing fractional values during integer
1563 * arithmetic.)
1564 */
1565 int
snmp_refresh_get_cycle_hint(int secs)1566 snmp_refresh_get_cycle_hint(int secs)
1567 {
1568 int jobs;
1569
1570 (void) mutex_lock(&refreshq_lock);
1571
1572 /*
1573 * First, we need to scale the client's cycle time to get it into the
1574 * same units we use internally (i.e. tens of seconds). We round up, as
1575 * it makes more sense for the client to process extra jobs than
1576 * insufficient jobs. If the client's desired cycle time is greater
1577 * than MAX_INCACHE_TIME, we just return the current total number of
1578 * jobs.
1579 */
1580 secs = (secs + HRTIME_SCALE - 1) / HRTIME_SCALE;
1581
1582 jobs = (n_refreshq_jobs * secs) / MAX_INCACHE_TIME;
1583 if (jobs > n_refreshq_jobs) {
1584 jobs = n_refreshq_jobs;
1585 }
1586
1587 (void) mutex_unlock(&refreshq_lock);
1588
1589 return (jobs);
1590 }
1591
1592 /*
1593 * Process the next job on the refresh queue by invoking fetch_bulk() with the
1594 * recorded parameters. Return -1 if no job was processed (e.g. because there
1595 * aren't any available), or 0 if a job was processed. We don't actually care
1596 * if fetch_bulk() fails, since we're just working on cache entry refreshing and
1597 * the worst case result of failing here is a longer delay getting that data the
1598 * next time it is requested.
1599 */
1600 int
snmp_refresh_process_job(void)1601 snmp_refresh_process_job(void)
1602 {
1603 struct picl_snmphdl *smd;
1604 char *oidstrs;
1605 int n_oids;
1606 int row;
1607 int err;
1608
1609 (void) mutex_lock(&refreshq_lock);
1610
1611 if (n_refreshq_jobs == 0) {
1612 (void) mutex_unlock(&refreshq_lock);
1613
1614 return (-1);
1615 }
1616
1617 smd = refreshq[refreshq_next_job].smd;
1618 oidstrs = refreshq[refreshq_next_job].oidstrs;
1619 n_oids = refreshq[refreshq_next_job].n_oids;
1620 row = refreshq[refreshq_next_job].row;
1621
1622 refreshq_next_job = (refreshq_next_job + 1) % n_refreshq_slots;
1623 n_refreshq_jobs--;
1624
1625 (void) mutex_unlock(&refreshq_lock);
1626
1627
1628 /*
1629 * fetch_bulk() is going to come right back into the refresh code to add
1630 * a new job for the entry we just loaded, which means we have to make
1631 * the call without holding the refreshq_lock mutex.
1632 */
1633 fetch_bulk(smd, oidstrs, n_oids, row, 1, &err);
1634
1635 return (0);
1636 }
1637