1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/mc.h> 30 #include <sys/nvpair.h> 31 #include <sys/fm/protocol.h> 32 #include <sys/cmn_err.h> 33 #include <sys/sunddi.h> 34 #include <sys/mc_intel.h> 35 #include "dimm_addr.h" 36 #include "nb_log.h" 37 #include "rank.h" 38 #include "dimm_phys.h" 39 #include "nb5000.h" 40 41 struct dimm_geometry **dimm_geometry; 42 struct rank_base *rank_base; 43 44 uint64_t 45 dimm_getphys(int branch, int rank, int bank, int ras, int cas) 46 { 47 uint8_t i; 48 int num_ranks_per_branch; 49 uint64_t m; 50 uint64_t pa; 51 struct rank_base *rp; 52 struct rank_geometry *rgp; 53 54 /* max number of ranks per branch */ 55 num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ? 56 NB_5100_RANKS_PER_CHANNEL : 57 nb_dimms_per_channel * nb_channels_per_branch; 58 ASSERT(rank < num_ranks_per_branch); 59 rp = &rank_base[(branch * num_ranks_per_branch) + rank]; 60 rgp = (struct rank_geometry *)rp->rank_geometry; 61 if (rgp == NULL) 62 return (-1LL); 63 pa = rp->base; 64 65 for (i = 0, m = 1; bank; i++, m <<= 1) { 66 if ((bank & m) != 0 && rgp->bank[i] != 0xff) { 67 pa += 1 << rgp->bank[i]; 68 bank &= ~m; 69 } 70 } 71 for (i = 0, m = 1; cas; i++, m <<= 1) { 72 if ((cas & m) != 0 && rgp->col[i] != 0xff) { 73 pa += 1 << rgp->col[i]; 74 cas &= ~m; 75 } 76 } 77 for (i = 0, m = 1; ras; i++, m <<= 1) { 78 if ((ras & m) != 0 && rgp->row[i] != 0xff) { 79 pa += 1 << rgp->row[i]; 80 ras &= ~m; 81 } 82 } 83 if (rp->interleave > 1) { 84 i = 0; 85 if (rp->branch_interleave) { 86 if (branch) { 87 pa += 1 << rgp->interleave[i]; 88 } 89 i++; 90 } 91 if ((rp->way & 1) != 0) 92 pa += 1 << rgp->interleave[i]; 93 i++; 94 if ((rp->way & 2) != 0) 95 pa += 1 << rgp->interleave[i]; 96 } 97 if (rp->hole && pa >= rp->hole) 98 pa += rp->hole_size; 99 return (pa); 100 } 101 102 uint64_t 103 dimm_getoffset(int branch, int rank, int bank, int ras, int cas) 104 { 105 uint8_t i; 106 int num_ranks_per_branch; 107 uint64_t m; 108 uint64_t offset; 109 struct dimm_geometry *dgp; 110 struct rank_geometry *rgp; 111 struct rank_base *rp; 112 uint64_t pa; 113 uint64_t cal_pa; 114 115 /* max number of ranks per branch */ 116 num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ? 117 NB_5100_RANKS_PER_CHANNEL : 118 nb_dimms_per_channel * nb_channels_per_branch; 119 ASSERT(rank < num_ranks_per_branch); 120 rp = &rank_base[(branch * num_ranks_per_branch) + rank]; 121 dgp = dimm_geometry[(branch * nb_dimms_per_channel) + 122 nb_rank2dimm(branch, rank)]; 123 if (dgp == NULL) 124 return (TCODE_OFFSET(rank, bank, ras, cas)); 125 rgp = (struct rank_geometry *)&dgp->rank_geometry[0]; 126 offset = 0; 127 pa = dimm_getphys(branch, rank, bank, ras, cas) & PAGEMASK; 128 129 for (i = 0, m = 1; bank; i++, m <<= 1) { 130 if ((bank & m) != 0 && rgp->bank[i] != 0xff) { 131 offset += 1 << rgp->bank[i]; 132 bank &= ~m; 133 } 134 } 135 for (i = 0, m = 1; cas; i++, m <<= 1) { 136 if ((cas & m) != 0 && rgp->col[i] != 0xff) { 137 offset += 1 << rgp->col[i]; 138 cas &= ~m; 139 } 140 } 141 for (i = 0, m = 1; ras; i++, m <<= 1) { 142 if ((ras & m) != 0 && rgp->row[i] != 0xff) { 143 offset += 1 << rgp->row[i]; 144 ras &= ~m; 145 } 146 } 147 cal_pa = rp->base + (offset * rp->interleave); 148 if (rp->hole && cal_pa >= rp->hole) 149 cal_pa += rp->hole_size; 150 cal_pa &= PAGEMASK; 151 152 if (pa != cal_pa) { 153 return (-1LL); 154 } 155 return (offset & PAGEMASK); 156 } 157 158 static int 159 fmri2unum(nvlist_t *nvl, mc_unum_t *unump) 160 { 161 int i; 162 uint64_t offset; 163 nvlist_t **hcl, *hcsp; 164 uint_t npr; 165 166 if (nvlist_lookup_nvlist(nvl, FM_FMRI_HC_SPECIFIC, &hcsp) != 0 || 167 (nvlist_lookup_uint64(hcsp, "asru-" FM_FMRI_HC_SPECIFIC_OFFSET, 168 &offset) != 0 && nvlist_lookup_uint64(hcsp, 169 FM_FMRI_HC_SPECIFIC_OFFSET, &offset) != 0) || 170 nvlist_lookup_nvlist_array(nvl, FM_FMRI_HC_LIST, &hcl, &npr) != 0) 171 return (0); 172 173 174 bzero(unump, sizeof (mc_unum_t)); 175 for (i = 0; i < MC_UNUM_NDIMM; i++) 176 unump->unum_dimms[i] = MC_INVALNUM; 177 178 for (i = 0; i < npr; i++) { 179 char *hcnm, *hcid; 180 long v; 181 182 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &hcnm) != 0 || 183 nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0 || 184 ddi_strtol(hcid, NULL, 0, &v) != 0) 185 return (0); 186 187 if (strcmp(hcnm, "motherboard") == 0) 188 unump->unum_board = (int)v; 189 else if (strcmp(hcnm, "memory-controller") == 0) 190 unump->unum_mc = (int)v; 191 else if (strcmp(hcnm, "dram-channel") == 0) 192 unump->unum_cs = (int)v; 193 else if (strcmp(hcnm, "dimm") == 0) 194 unump->unum_dimms[0] = (int)v; 195 else if (strcmp(hcnm, "rank") == 0) 196 unump->unum_rank = (int)v; 197 } 198 199 unump->unum_offset = offset; 200 201 return (1); 202 } 203 204 /*ARGSUSED*/ 205 static cmi_errno_t 206 inb_patounum(void *arg, uint64_t pa, uint8_t valid_hi, uint8_t valid_lo, 207 uint32_t synd, int syndtype, mc_unum_t *unump) 208 { 209 struct rank_base *rp; 210 int i; 211 int last; 212 uint64_t offset; 213 cmi_errno_t rt = CMIERR_UNKNOWN; 214 215 last = nb_dimms_per_channel * nb_number_memory_controllers; 216 for (i = 0; i < last; i++) { 217 rp = &rank_base[i]; 218 if (rp && pa >= rp->base && pa < rp->limit) 219 break; 220 } 221 if (i < last) { 222 offset = pa - rp->base; 223 if (offset > rp->hole) 224 offset -= rp->hole_size; 225 unump->unum_offset = offset / rp->interleave; 226 unump->unum_mc = i / nb_dimms_per_channel; 227 unump->unum_cs = 0; 228 unump->unum_rank = i % nb_dimms_per_channel; 229 rt = CMI_SUCCESS; 230 } 231 return (rt); 232 } 233 234 /*ARGSUSED*/ 235 static cmi_errno_t 236 inb_unumtopa(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap) 237 { 238 int num_ranks_per_branch; 239 mc_unum_t unum; 240 uint64_t pa; 241 struct rank_base *rp; 242 243 if (unump == NULL) { 244 if (!fmri2unum(nvl, &unum)) 245 return (CMI_SUCCESS); 246 unump = &unum; 247 } 248 if (unump->unum_offset & OFFSET_ROW_BANK_COL) { 249 pa = dimm_getphys(unump->unum_mc, 250 TCODE_OFFSET_RANK(unump->unum_offset), 251 TCODE_OFFSET_BANK(unump->unum_offset), 252 TCODE_OFFSET_RAS(unump->unum_offset), 253 TCODE_OFFSET_CAS(unump->unum_offset)); 254 if (pa == -1LL) 255 return (CMIERR_MC_NOADDR); 256 *pap = pa; 257 return (CMI_SUCCESS); 258 } 259 260 261 /* max number of ranks per branch */ 262 num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ? 263 NB_5100_RANKS_PER_CHANNEL : 264 nb_dimms_per_channel * nb_channels_per_branch; 265 rp = &rank_base[(unump->unum_mc * num_ranks_per_branch) + 266 unump->unum_rank]; 267 pa = rp->base + (unump->unum_offset * rp->interleave); 268 269 if (rp->hole && pa >= rp->hole) 270 pa += rp->hole_size; 271 *pap = pa; 272 return (CMI_SUCCESS); 273 } 274 275 void 276 dimm_init() 277 { 278 int num_ranks_per_branch; 279 280 dimm_geometry = kmem_zalloc(sizeof (void *) * 281 nb_number_memory_controllers * nb_dimms_per_channel, KM_SLEEP); 282 283 /* max number of ranks per branch */ 284 num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ? 285 NB_5100_RANKS_PER_CHANNEL : 286 nb_dimms_per_channel * nb_channels_per_branch; 287 288 rank_base = kmem_zalloc(sizeof (struct rank_base) * 289 nb_number_memory_controllers * num_ranks_per_branch, KM_SLEEP); 290 } 291 292 void 293 dimm_fini() 294 { 295 int num_ranks_per_branch; 296 297 kmem_free(dimm_geometry, sizeof (void *) * 298 nb_number_memory_controllers * nb_dimms_per_channel); 299 dimm_geometry = 0; 300 301 /* max number of ranks per branch */ 302 num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ? 303 NB_5100_RANKS_PER_CHANNEL : 304 nb_dimms_per_channel * nb_channels_per_branch; 305 306 kmem_free(rank_base, sizeof (struct rank_base) * 307 nb_number_memory_controllers * num_ranks_per_branch); 308 rank_base = 0; 309 } 310 311 void 312 dimm_add_geometry(int branch, int dimm, int nbanks, int width, int ncolumn, 313 int nrow) 314 { 315 int i; 316 for (i = 0; i < dimm_types; i++) { 317 if (dimm_data[i].row_nbits == nrow && 318 dimm_data[i].col_nbits == ncolumn && 319 dimm_data[i].width == width && 320 (1 << dimm_data[i].bank_nbits) == nbanks) { 321 dimm_geometry[(branch * nb_dimms_per_channel) + dimm] = 322 &dimm_data[i]; 323 break; 324 } 325 } 326 } 327 328 void 329 dimm_add_rank(int branch, int rank, int branch_interleave, int way, 330 uint64_t base, uint32_t hole, uint32_t hole_size, int interleave, 331 uint64_t limit) 332 { 333 struct dimm_geometry *dimm; 334 struct rank_base *rp; 335 int interleave_nbits; 336 int num_ranks_per_branch; 337 338 dimm = dimm_geometry[(branch * nb_dimms_per_channel) + 339 nb_rank2dimm(branch, rank)]; 340 341 /* max number of ranks per branch */ 342 num_ranks_per_branch = (nb_chipset == INTEL_NB_5100) ? 343 NB_5100_RANKS_PER_CHANNEL : 344 nb_dimms_per_channel * nb_channels_per_branch; 345 rp = &rank_base[(branch * num_ranks_per_branch) + rank]; 346 if (interleave == 1) 347 interleave_nbits = 0; 348 else if (interleave == 2) 349 interleave_nbits = 1; 350 else if (interleave == 4) 351 interleave_nbits = 2; 352 else 353 interleave_nbits = 3; 354 rp->branch_interleave = branch_interleave; 355 rp->way = way; 356 rp->base = base; 357 rp->hole = hole; 358 rp->hole_size = hole_size; 359 rp->interleave = interleave; 360 rp->limit = limit; 361 if (dimm) 362 rp->rank_geometry = &dimm->rank_geometry[interleave_nbits]; 363 else 364 rp->rank_geometry = 0; 365 } 366 367 static const cmi_mc_ops_t inb_mc_ops = { 368 inb_patounum, 369 inb_unumtopa, 370 nb_error_trap /* cmi_mc_logout */ 371 }; 372 373 /*ARGSUSED*/ 374 int 375 inb_mc_register(cmi_hdl_t hdl, void *arg1, void *arg2, void *arg3) 376 { 377 cmi_mc_register(hdl, &inb_mc_ops, NULL); 378 return (CMI_HDL_WALK_NEXT); 379 } 380