xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/dimm_addr.c (revision b31b5de1357c915fe7dab4d9646d9d84f9fe69bc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/kmem.h>
29 #include <sys/mc.h>
30 #include <sys/nvpair.h>
31 #include <sys/fm/protocol.h>
32 #include <sys/cmn_err.h>
33 #include <sys/sunddi.h>
34 #include <sys/mc_intel.h>
35 #include "dimm_addr.h"
36 #include "nb_log.h"
37 #include "rank.h"
38 #include "dimm_phys.h"
39 #include "nb5000.h"
40 
41 struct dimm_geometry **dimm_geometry;
42 struct rank_base *rank_base;
43 
44 uint64_t
45 dimm_getphys(int branch, int rank, int bank, int ras, int cas)
46 {
47 	uint8_t i;
48 	uint64_t m;
49 	uint64_t pa;
50 	struct rank_base *rp;
51 	struct rank_geometry *rgp;
52 
53 	ASSERT(rank < nb_dimms_per_channel * 2);
54 	rp = &rank_base[(branch * nb_dimms_per_channel * 2) + rank];
55 	rgp = (struct rank_geometry *)rp->rank_geometry;
56 	if (rgp == NULL)
57 		return (-1LL);
58 	pa = rp->base;
59 
60 	for (i = 0, m = 1; bank; i++, m <<= 1) {
61 		if ((bank & m) != 0 && rgp->bank[i] != 0xff) {
62 			pa += 1 << rgp->bank[i];
63 			bank &= ~m;
64 		}
65 	}
66 	for (i = 0, m = 1; cas; i++, m <<= 1) {
67 		if ((cas & m) != 0 && rgp->col[i] != 0xff) {
68 			pa += 1 << rgp->col[i];
69 			cas &= ~m;
70 		}
71 	}
72 	for (i = 0, m = 1; ras; i++, m <<= 1) {
73 		if ((ras & m) != 0 && rgp->row[i] != 0xff) {
74 			pa += 1 << rgp->row[i];
75 			ras &= ~m;
76 		}
77 	}
78 	if (rp->interleave > 1) {
79 		i = 0;
80 		if (rp->branch_interleave) {
81 			if (branch) {
82 				pa += 1 << rgp->interleave[i];
83 			}
84 			i++;
85 		}
86 		if ((rp->way & 1) != 0)
87 			pa += 1 << rgp->interleave[i];
88 		i++;
89 		if ((rp->way & 2) != 0)
90 			pa += 1 << rgp->interleave[i];
91 	}
92 	if (rp->hole && pa >= rp->hole)
93 		pa += rp->hole_size;
94 	return (pa);
95 }
96 
97 uint64_t
98 dimm_getoffset(int branch, int rank, int bank, int ras, int cas)
99 {
100 	uint8_t i;
101 	uint64_t m;
102 	uint64_t offset;
103 	struct dimm_geometry *dgp;
104 	struct rank_geometry *rgp;
105 	struct rank_base *rp;
106 	uint64_t pa;
107 	uint64_t cal_pa;
108 
109 	ASSERT(rank < nb_dimms_per_channel * 2);
110 	rp = &rank_base[(branch * nb_dimms_per_channel * 2) + rank];
111 	dgp = dimm_geometry[(branch * nb_dimms_per_channel) + rank/2];
112 	if (dgp == NULL)
113 		return (TCODE_OFFSET(rank, bank, ras, cas));
114 	rgp = (struct rank_geometry *)&dgp->rank_geometry[0];
115 	offset = 0;
116 	pa = dimm_getphys(branch, rank, bank, ras, cas) & PAGEMASK;
117 
118 	for (i = 0, m = 1; bank; i++, m <<= 1) {
119 		if ((bank & m) != 0 && rgp->bank[i] != 0xff) {
120 			offset += 1 << rgp->bank[i];
121 			bank &= ~m;
122 		}
123 	}
124 	for (i = 0, m = 1; cas; i++, m <<= 1) {
125 		if ((cas & m) != 0 && rgp->col[i] != 0xff) {
126 			offset += 1 << rgp->col[i];
127 			cas &= ~m;
128 		}
129 	}
130 	for (i = 0, m = 1; ras; i++, m <<= 1) {
131 		if ((ras & m) != 0 && rgp->row[i] != 0xff) {
132 			offset += 1 << rgp->row[i];
133 			ras &= ~m;
134 		}
135 	}
136 	cal_pa = rp->base + (offset * rp->interleave);
137 	if (rp->hole && cal_pa >= rp->hole)
138 		cal_pa += rp->hole_size;
139 	cal_pa &= PAGEMASK;
140 
141 	if (pa != cal_pa) {
142 		return (-1LL);
143 	}
144 	return (offset & PAGEMASK);
145 }
146 
147 static int
148 fmri2unum(nvlist_t *nvl, mc_unum_t *unump)
149 {
150 	int i;
151 	uint64_t offset;
152 	nvlist_t **hcl, *hcsp;
153 	uint_t npr;
154 
155 	if (nvlist_lookup_nvlist(nvl, FM_FMRI_HC_SPECIFIC, &hcsp) != 0 ||
156 	    (nvlist_lookup_uint64(hcsp, "asru-" FM_FMRI_HC_SPECIFIC_OFFSET,
157 	    &offset) != 0 && nvlist_lookup_uint64(hcsp,
158 	    FM_FMRI_HC_SPECIFIC_OFFSET, &offset) != 0) ||
159 	    nvlist_lookup_nvlist_array(nvl, FM_FMRI_HC_LIST, &hcl, &npr) != 0)
160 		return (0);
161 
162 
163 	bzero(unump, sizeof (mc_unum_t));
164 	for (i = 0; i < MC_UNUM_NDIMM; i++)
165 		unump->unum_dimms[i] = MC_INVALNUM;
166 
167 	for (i = 0; i < npr; i++) {
168 		char *hcnm, *hcid;
169 		long v;
170 
171 		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, &hcnm) != 0 ||
172 		    nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0 ||
173 		    ddi_strtol(hcid, NULL, 0, &v) != 0)
174 			return (0);
175 
176 		if (strcmp(hcnm, "motherboard") == 0)
177 			unump->unum_board = (int)v;
178 		else if (strcmp(hcnm, "memory-controller") == 0)
179 			unump->unum_mc = (int)v;
180 		else if (strcmp(hcnm, "dram-channel") == 0)
181 			unump->unum_cs = (int)v;
182 		else if (strcmp(hcnm, "dimm") == 0)
183 			unump->unum_dimms[0] = (int)v;
184 		else if (strcmp(hcnm, "rank") == 0)
185 			unump->unum_rank = (int)v;
186 	}
187 
188 	unump->unum_offset = offset;
189 
190 	return (1);
191 }
192 
193 /*ARGSUSED*/
194 static cmi_errno_t
195 inb_patounum(void *arg, uint64_t pa, uint8_t valid_hi, uint8_t valid_lo,
196     uint32_t synd, int syndtype, mc_unum_t *unump)
197 {
198 	struct rank_base *rp;
199 	int i;
200 	int last;
201 	uint64_t offset;
202 	cmi_errno_t rt = CMIERR_UNKNOWN;
203 
204 	last = nb_dimms_per_channel * nb_number_memory_controllers;
205 	for (i = 0; i < last; i++) {
206 		rp = &rank_base[i];
207 		if (rp && pa >= rp->base && pa < rp->limit)
208 			break;
209 	}
210 	if (i < last) {
211 		offset = pa - rp->base;
212 		if (offset > rp->hole)
213 			offset -= rp->hole_size;
214 		unump->unum_offset = offset / rp->interleave;
215 		unump->unum_mc = i / nb_dimms_per_channel;
216 		unump->unum_cs = 0;
217 		unump->unum_rank = i % nb_dimms_per_channel;
218 		rt = CMI_SUCCESS;
219 	}
220 	return (rt);
221 }
222 
223 /*ARGSUSED*/
224 static cmi_errno_t
225 inb_unumtopa(void *arg, mc_unum_t *unump, nvlist_t *nvl, uint64_t *pap)
226 {
227 	mc_unum_t unum;
228 	uint64_t pa;
229 	struct rank_base *rp;
230 
231 	if (unump == NULL) {
232 		if (!fmri2unum(nvl, &unum))
233 			return (CMI_SUCCESS);
234 		unump = &unum;
235 	}
236 	if (unump->unum_offset & OFFSET_ROW_BANK_COL) {
237 		pa = dimm_getphys(unump->unum_mc,
238 		    TCODE_OFFSET_RANK(unump->unum_offset),
239 		    TCODE_OFFSET_BANK(unump->unum_offset),
240 		    TCODE_OFFSET_RAS(unump->unum_offset),
241 		    TCODE_OFFSET_CAS(unump->unum_offset));
242 		if (pa == -1LL)
243 			return (CMIERR_MC_NOADDR);
244 		*pap = pa;
245 		return (CMI_SUCCESS);
246 	}
247 	rp = &rank_base[(unump->unum_mc * nb_dimms_per_channel * 2) +
248 	    unump->unum_rank];
249 	pa = rp->base + (unump->unum_offset * rp->interleave);
250 
251 	if (rp->hole && pa >= rp->hole)
252 		pa += rp->hole_size;
253 	*pap = pa;
254 	return (CMI_SUCCESS);
255 }
256 
257 void
258 dimm_init()
259 {
260 	dimm_geometry = kmem_zalloc(sizeof (void *) *
261 	    nb_number_memory_controllers * nb_dimms_per_channel, KM_SLEEP);
262 	rank_base = kmem_zalloc(sizeof (struct rank_base) *
263 	    nb_number_memory_controllers * nb_dimms_per_channel * 2, KM_SLEEP);
264 }
265 
266 void
267 dimm_fini()
268 {
269 	kmem_free(dimm_geometry, sizeof (void *) *
270 	    nb_number_memory_controllers * nb_dimms_per_channel);
271 	dimm_geometry = 0;
272 	kmem_free(rank_base, sizeof (struct rank_base) *
273 	    nb_number_memory_controllers * nb_dimms_per_channel * 2);
274 	rank_base = 0;
275 }
276 
277 void
278 dimm_add_geometry(int branch, int dimm, int nbanks, int width, int ncolumn,
279     int nrow)
280 {
281 	int i;
282 	for (i = 0; i < dimm_types; i++) {
283 		if (dimm_data[i].row_nbits == nrow &&
284 		    dimm_data[i].col_nbits == ncolumn &&
285 		    dimm_data[i].width == width &&
286 		    (1 << dimm_data[i].bank_nbits) == nbanks) {
287 			dimm_geometry[(branch * nb_dimms_per_channel) + dimm] =
288 			    &dimm_data[i];
289 			break;
290 		}
291 	}
292 }
293 
294 void
295 dimm_add_rank(int branch, int rank, int branch_interleave, int way,
296     uint64_t base, uint32_t hole, uint32_t hole_size, int interleave,
297     uint64_t limit)
298 {
299 	struct dimm_geometry *dimm;
300 	struct rank_base *rp;
301 	int interleave_nbits;
302 
303 	dimm = dimm_geometry[(branch * nb_dimms_per_channel) + (rank / 2)];
304 	rp = &rank_base[(branch * nb_dimms_per_channel * 2) + rank];
305 	if (interleave == 1)
306 		interleave_nbits = 0;
307 	else if (interleave == 2)
308 		interleave_nbits = 1;
309 	else if (interleave == 4)
310 		interleave_nbits = 2;
311 	else
312 		interleave_nbits = 3;
313 	rp->branch_interleave = branch_interleave;
314 	rp->way = way;
315 	rp->base = base;
316 	rp->hole = hole;
317 	rp->hole_size = hole_size;
318 	rp->interleave = interleave;
319 	rp->limit = limit;
320 	if (dimm)
321 		rp->rank_geometry = &dimm->rank_geometry[interleave_nbits];
322 	else
323 		rp->rank_geometry = 0;
324 }
325 
326 static const cmi_mc_ops_t inb_mc_ops = {
327 	inb_patounum,
328 	inb_unumtopa,
329 	nb_error_trap			/* cmi_mc_logout */
330 };
331 
332 /*ARGSUSED*/
333 int
334 inb_mc_register(cmi_hdl_t hdl, void *arg1, void *arg2, void *arg3)
335 {
336 	cmi_mc_register(hdl, &inb_mc_ops, NULL);
337 	return (CMI_HDL_WALK_NEXT);
338 }
339