1 #include "jemalloc/internal/jemalloc_preamble.h" 2 #include "jemalloc/internal/sz.h" 3 4 JEMALLOC_ALIGNED(CACHELINE) 5 size_t sz_pind2sz_tab[SC_NPSIZES+1]; 6 7 static void 8 sz_boot_pind2sz_tab(const sc_data_t *sc_data) { 9 int pind = 0; 10 for (unsigned i = 0; i < SC_NSIZES; i++) { 11 const sc_t *sc = &sc_data->sc[i]; 12 if (sc->psz) { 13 sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base) 14 + (ZU(sc->ndelta) << sc->lg_delta); 15 pind++; 16 } 17 } 18 for (int i = pind; i <= (int)SC_NPSIZES; i++) { 19 sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE; 20 } 21 } 22 23 JEMALLOC_ALIGNED(CACHELINE) 24 size_t sz_index2size_tab[SC_NSIZES]; 25 26 static void 27 sz_boot_index2size_tab(const sc_data_t *sc_data) { 28 for (unsigned i = 0; i < SC_NSIZES; i++) { 29 const sc_t *sc = &sc_data->sc[i]; 30 sz_index2size_tab[i] = (ZU(1) << sc->lg_base) 31 + (ZU(sc->ndelta) << (sc->lg_delta)); 32 } 33 } 34 35 /* 36 * To keep this table small, we divide sizes by the tiny min size, which gives 37 * the smallest interval for which the result can change. 38 */ 39 JEMALLOC_ALIGNED(CACHELINE) 40 uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1]; 41 42 static void 43 sz_boot_size2index_tab(const sc_data_t *sc_data) { 44 size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1; 45 size_t dst_ind = 0; 46 for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max; 47 sc_ind++) { 48 const sc_t *sc = &sc_data->sc[sc_ind]; 49 size_t sz = (ZU(1) << sc->lg_base) 50 + (ZU(sc->ndelta) << sc->lg_delta); 51 size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1) 52 >> SC_LG_TINY_MIN); 53 for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) { 54 sz_size2index_tab[dst_ind] = sc_ind; 55 } 56 } 57 } 58 59 void 60 sz_boot(const sc_data_t *sc_data) { 61 sz_boot_pind2sz_tab(sc_data); 62 sz_boot_index2size_tab(sc_data); 63 sz_boot_size2index_tab(sc_data); 64 } 65