1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2019 Joyent, Inc. 14 */ 15 16 #include <sys/comm_page.h> 17 #include <sys/tsc.h> 18 19 20 /* 21 * Interrogate if querying the clock via the comm page is possible. 22 */ 23 int 24 __cp_can_gettime(comm_page_t *cp) 25 { 26 switch (cp->cp_tsc_type) { 27 case TSC_TSCP: 28 case TSC_RDTSC_LFENCE: 29 case TSC_RDTSC_CPUID: 30 return (1); 31 default: 32 break; 33 } 34 return (0); 35 } 36 37 #ifdef __amd64 38 39 /* 40 * The functions used for calculating time (both monotonic and wall-clock) are 41 * implemented in assembly on amd64. This is primarily for stack conservation. 42 */ 43 44 #else /* i386 below */ 45 46 /* 47 * ASM-defined functions. 48 */ 49 extern hrtime_t __cp_tsc_read(comm_page_t *); 50 extern hrtime_t __cp_gethrtime_fasttrap(); 51 52 /* 53 * These are cloned from TSC and time related code in the kernel. The should 54 * be kept in sync in the case that the source values are changed. 55 */ 56 #define NSEC_SHIFT 5 57 #define ADJ_SHIFT 4 58 #define NANOSEC 1000000000LL 59 60 #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) do { \ 61 uint32_t *_l = (uint32_t *)&(tsc); \ 62 uint64_t sc = (uint32_t)(scale); \ 63 (hrt) += (uint64_t)(_l[1] * sc) << NSEC_SHIFT; \ 64 (hrt) += (uint64_t)(_l[0] * sc) >> (32 - NSEC_SHIFT); \ 65 } while (0) 66 67 /* 68 * Userspace version of tsc_gethrtime. 69 * See: uts/i86pc/os/timestamp.c 70 */ 71 hrtime_t 72 __cp_gethrtime(comm_page_t *cp) 73 { 74 uint32_t old_hres_lock; 75 hrtime_t tsc, hrt, tsc_last; 76 77 /* 78 * Several precautions must be taken when collecting the data necessary 79 * to perform an accurate gethrtime calculation. 80 * 81 * While much of the TSC state stored in the comm page is unchanging 82 * after boot, portions of it are periodically updated during OS ticks. 83 * Changes to hres_lock during the course of the copy indicates a 84 * potentially inconsistent snapshot, necessitating a loop. 85 * 86 * Even more complicated is the handling for TSCs which require sync 87 * offsets between different CPUs. Since userspace lacks the luxury of 88 * disabling interrupts, a validation loop checking for CPU migrations 89 * is used. Pathological scheduling could, in theory, "outwit" 90 * this check. Such a possibility is considered an acceptable risk. 91 * 92 */ 93 do { 94 old_hres_lock = cp->cp_hres_lock; 95 tsc_last = cp->cp_tsc_last; 96 hrt = cp->cp_tsc_hrtime_base; 97 tsc = __cp_tsc_read(cp); 98 99 /* 100 * A TSC reading of 0 indicates the special case of an error 101 * bail-out. Rely on the fasttrap to supply an hrtime value. 102 */ 103 if (tsc == 0) { 104 return (__cp_gethrtime_fasttrap()); 105 } 106 } while ((old_hres_lock & ~1) != cp->cp_hres_lock); 107 108 if (tsc >= tsc_last) { 109 tsc -= tsc_last; 110 } else if (tsc >= tsc_last - (2 * cp->cp_tsc_max_delta)) { 111 tsc = 0; 112 } else if (tsc > cp->cp_tsc_resume_cap) { 113 tsc = cp->cp_tsc_resume_cap; 114 } 115 TSC_CONVERT_AND_ADD(tsc, hrt, cp->cp_nsec_scale); 116 117 return (hrt); 118 } 119 120 /* 121 * Userspace version of pc_gethrestime. 122 * See: uts/i86pc/os/machdep.c 123 */ 124 int 125 __cp_clock_gettime_realtime(comm_page_t *cp, timespec_t *tsp) 126 { 127 int lock_prev, nslt; 128 timespec_t now; 129 int64_t hres_adj; 130 131 loop: 132 lock_prev = cp->cp_hres_lock; 133 now.tv_sec = cp->cp_hrestime[0]; 134 now.tv_nsec = cp->cp_hrestime[1]; 135 nslt = (int)(__cp_gethrtime(cp) - cp->cp_hres_last_tick); 136 hres_adj = cp->cp_hrestime_adj; 137 if (nslt < 0) { 138 /* 139 * Tick came between sampling hrtime and hres_last_tick; 140 */ 141 goto loop; 142 } 143 now.tv_nsec += nslt; 144 145 /* 146 * Apply hres_adj skew, if needed. 147 */ 148 if (hres_adj > 0) { 149 nslt = (nslt >> ADJ_SHIFT); 150 if (nslt > hres_adj) 151 nslt = (int)hres_adj; 152 now.tv_nsec += nslt; 153 } else if (hres_adj < 0) { 154 nslt = -(nslt >> ADJ_SHIFT); 155 if (nslt < hres_adj) 156 nslt = (int)hres_adj; 157 now.tv_nsec += nslt; 158 } 159 160 /* 161 * Rope in tv_nsec from any excessive adjustments. 162 */ 163 while ((unsigned long)now.tv_nsec >= NANOSEC) { 164 now.tv_nsec -= NANOSEC; 165 now.tv_sec++; 166 } 167 168 if ((cp->cp_hres_lock & ~1) != lock_prev) 169 goto loop; 170 171 *tsp = now; 172 return (0); 173 } 174 175 /* 176 * The __cp_clock_gettime_monotonic function expects that hrt2ts be present 177 * when the code is finally linked. 178 * (The amd64 version has no such requirement.) 179 */ 180 extern void hrt2ts(hrtime_t, timespec_t *); 181 182 int 183 __cp_clock_gettime_monotonic(comm_page_t *cp, timespec_t *tsp) 184 { 185 hrtime_t hrt; 186 187 hrt = __cp_gethrtime(cp); 188 hrt2ts(hrt, tsp); 189 return (0); 190 } 191 192 #endif /* __amd64 */ 193