xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c (revision 9e1e9d660255d7216067193d774f338d08d8528d)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31 
32 union firmware_info {
33 	struct atom_firmware_info_v3_1 v31;
34 	struct atom_firmware_info_v3_2 v32;
35 	struct atom_firmware_info_v3_3 v33;
36 	struct atom_firmware_info_v3_4 v34;
37 	struct atom_firmware_info_v3_5 v35;
38 };
39 
40 /*
41  * Helper function to query firmware capability
42  *
43  * @adev: amdgpu_device pointer
44  *
45  * Return firmware_capability in firmwareinfo table on success or 0 if not
46  */
47 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
48 {
49 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
50 	int index;
51 	u16 data_offset, size;
52 	union firmware_info *firmware_info;
53 	u8 frev, crev;
54 	u32 fw_cap = 0;
55 
56 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
57 			firmwareinfo);
58 
59 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
60 				index, &size, &frev, &crev, &data_offset)) {
61 		/* support firmware_info 3.1 + */
62 		if ((frev == 3 && crev >= 1) || (frev > 3)) {
63 			firmware_info = (union firmware_info *)
64 				(mode_info->atom_context->bios + data_offset);
65 			fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
66 		}
67 	}
68 
69 	return fw_cap;
70 }
71 
72 /*
73  * Helper function to query gpu virtualizaiton capability
74  *
75  * @adev: amdgpu_device pointer
76  *
77  * Return true if gpu virtualization is supported or false if not
78  */
79 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
80 {
81 	u32 fw_cap;
82 
83 	fw_cap = adev->mode_info.firmware_flags;
84 
85 	return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
86 }
87 
88 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
89 {
90 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
91 						firmwareinfo);
92 	uint16_t data_offset;
93 
94 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
95 					  NULL, NULL, &data_offset)) {
96 		struct atom_firmware_info_v3_1 *firmware_info =
97 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
98 							   data_offset);
99 
100 		adev->bios_scratch_reg_offset =
101 			le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
102 	}
103 }
104 
105 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
106 	struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
107 {
108 	u32 start_addr, fw_size, drv_size;
109 
110 	start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
111 	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
112 	drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
113 
114 	DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
115 			  start_addr,
116 			  fw_size,
117 			  drv_size);
118 
119 	if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
120 		(u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
121 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
122 		/* Firmware request VRAM reservation for SR-IOV */
123 		adev->mman.fw_vram_usage_start_offset = (start_addr &
124 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
125 		adev->mman.fw_vram_usage_size = fw_size << 10;
126 		/* Use the default scratch size */
127 		*usage_bytes = 0;
128 	} else {
129 		*usage_bytes = drv_size << 10;
130 	}
131 	return 0;
132 }
133 
134 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
135 		struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
136 {
137 	u32 fw_start_addr, fw_size, drv_start_addr, drv_size;
138 
139 	fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
140 	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
141 
142 	drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
143 	drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
144 
145 	DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
146 			  fw_start_addr,
147 			  fw_size,
148 			  drv_start_addr,
149 			  drv_size);
150 
151 	if (amdgpu_sriov_vf(adev) &&
152 	    ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
153 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
154 		/* Firmware request VRAM reservation for SR-IOV */
155 		adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
156 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
157 		adev->mman.fw_vram_usage_size = fw_size << 10;
158 	}
159 
160 	if (amdgpu_sriov_vf(adev) &&
161 	    ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
162 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
163 		/* driver request VRAM reservation for SR-IOV */
164 		adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
165 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
166 		adev->mman.drv_vram_usage_size = drv_size << 10;
167 	}
168 
169 	*usage_bytes = 0;
170 	return 0;
171 }
172 
173 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
174 {
175 	struct atom_context *ctx = adev->mode_info.atom_context;
176 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
177 						vram_usagebyfirmware);
178 	struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
179 	struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
180 	u16 data_offset;
181 	u8 frev, crev;
182 	int usage_bytes = 0;
183 
184 	/* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */
185 	if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) {
186 		if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
187 			if (frev == 2 && crev == 1) {
188 				fw_usage_v2_1 =
189 					(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
190 				amdgpu_atomfirmware_allocate_fb_v2_1(adev,
191 						fw_usage_v2_1,
192 						&usage_bytes);
193 			} else if (frev >= 2 && crev >= 2) {
194 				fw_usage_v2_2 =
195 					(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
196 				amdgpu_atomfirmware_allocate_fb_v2_2(adev,
197 						fw_usage_v2_2,
198 						&usage_bytes);
199 			}
200 		}
201 	}
202 
203 	ctx->scratch_size_bytes = 0;
204 	if (usage_bytes == 0)
205 		usage_bytes = 20 * 1024;
206 	/* allocate some scratch memory */
207 	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
208 	if (!ctx->scratch)
209 		return -ENOMEM;
210 	ctx->scratch_size_bytes = usage_bytes;
211 	return 0;
212 }
213 
214 union igp_info {
215 	struct atom_integrated_system_info_v1_11 v11;
216 	struct atom_integrated_system_info_v1_12 v12;
217 	struct atom_integrated_system_info_v2_1 v21;
218 	struct atom_integrated_system_info_v2_3 v23;
219 };
220 
221 union umc_info {
222 	struct atom_umc_info_v3_1 v31;
223 	struct atom_umc_info_v3_2 v32;
224 	struct atom_umc_info_v3_3 v33;
225 	struct atom_umc_info_v4_0 v40;
226 };
227 
228 union vram_info {
229 	struct atom_vram_info_header_v2_3 v23;
230 	struct atom_vram_info_header_v2_4 v24;
231 	struct atom_vram_info_header_v2_5 v25;
232 	struct atom_vram_info_header_v2_6 v26;
233 	struct atom_vram_info_header_v3_0 v30;
234 };
235 
236 union vram_module {
237 	struct atom_vram_module_v9 v9;
238 	struct atom_vram_module_v10 v10;
239 	struct atom_vram_module_v11 v11;
240 	struct atom_vram_module_v3_0 v30;
241 };
242 
243 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
244 					      int atom_mem_type)
245 {
246 	int vram_type;
247 
248 	if (adev->flags & AMD_IS_APU) {
249 		switch (atom_mem_type) {
250 		case Ddr2MemType:
251 		case LpDdr2MemType:
252 			vram_type = AMDGPU_VRAM_TYPE_DDR2;
253 			break;
254 		case Ddr3MemType:
255 		case LpDdr3MemType:
256 			vram_type = AMDGPU_VRAM_TYPE_DDR3;
257 			break;
258 		case Ddr4MemType:
259 			vram_type = AMDGPU_VRAM_TYPE_DDR4;
260 			break;
261 		case LpDdr4MemType:
262 			vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
263 			break;
264 		case Ddr5MemType:
265 			vram_type = AMDGPU_VRAM_TYPE_DDR5;
266 			break;
267 		case LpDdr5MemType:
268 			vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
269 			break;
270 		default:
271 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
272 			break;
273 		}
274 	} else {
275 		switch (atom_mem_type) {
276 		case ATOM_DGPU_VRAM_TYPE_GDDR5:
277 			vram_type = AMDGPU_VRAM_TYPE_GDDR5;
278 			break;
279 		case ATOM_DGPU_VRAM_TYPE_HBM2:
280 		case ATOM_DGPU_VRAM_TYPE_HBM2E:
281 		case ATOM_DGPU_VRAM_TYPE_HBM3:
282 			vram_type = AMDGPU_VRAM_TYPE_HBM;
283 			break;
284 		case ATOM_DGPU_VRAM_TYPE_GDDR6:
285 			vram_type = AMDGPU_VRAM_TYPE_GDDR6;
286 			break;
287 		case ATOM_DGPU_VRAM_TYPE_HBM3E:
288 			vram_type = AMDGPU_VRAM_TYPE_HBM3E;
289 			break;
290 		default:
291 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
292 			break;
293 		}
294 	}
295 
296 	return vram_type;
297 }
298 
299 static int amdgpu_atomfirmware_get_uma_carveout_info_v2_3(struct amdgpu_device *adev,
300 							  union igp_info *igp_info,
301 							  struct amdgpu_uma_carveout_info *uma_info)
302 {
303 	struct uma_carveout_option *opts;
304 	uint8_t nr_uma_options;
305 	int i;
306 
307 	nr_uma_options = igp_info->v23.UMACarveoutIndexMax;
308 
309 	if (!nr_uma_options)
310 		return -ENODEV;
311 
312 	if (nr_uma_options > MAX_UMA_OPTION_ENTRIES) {
313 		drm_dbg(adev_to_drm(adev),
314 			"Number of UMA options exceeds max table size. Options will not be parsed");
315 		return -EINVAL;
316 	}
317 
318 	uma_info->num_entries = nr_uma_options;
319 	uma_info->uma_option_index = igp_info->v23.UMACarveoutIndex;
320 
321 	opts = igp_info->v23.UMASizeControlOption;
322 
323 	for (i = 0; i < nr_uma_options; i++) {
324 		if (!opts[i].memoryCarvedGb)
325 			uma_info->entries[i].memory_carved_mb = 512;
326 		else
327 			uma_info->entries[i].memory_carved_mb = (uint32_t)opts[i].memoryCarvedGb << 10;
328 
329 		uma_info->entries[i].flags = opts[i].uma_carveout_option_flags.all8;
330 		strscpy(uma_info->entries[i].name, opts[i].optionName, MAX_UMA_OPTION_NAME);
331 	}
332 
333 	return 0;
334 }
335 
336 int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
337 					      struct amdgpu_uma_carveout_info *uma_info)
338 {
339 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
340 	union igp_info *igp_info;
341 	u16 data_offset, size;
342 	u8 frev, crev;
343 	int index;
344 
345 	if (!(adev->flags & AMD_IS_APU))
346 		return -ENODEV;
347 
348 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
349 					    integratedsysteminfo);
350 
351 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context,
352 					  index, &size,
353 					  &frev, &crev, &data_offset)) {
354 		return -EINVAL;
355 	}
356 
357 	igp_info = (union igp_info *)
358 			(mode_info->atom_context->bios + data_offset);
359 
360 	switch (frev) {
361 	case 2:
362 		switch (crev) {
363 		case 3:
364 			return amdgpu_atomfirmware_get_uma_carveout_info_v2_3(adev, igp_info, uma_info);
365 		break;
366 		default:
367 			break;
368 		}
369 		break;
370 	default:
371 		break;
372 	}
373 	return -ENODEV;
374 }
375 
376 int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev,
377 				  int *vram_width, int *vram_type,
378 				  int *vram_vendor)
379 {
380 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
381 	int index;
382 	u16 data_offset, size;
383 	union igp_info *igp_info;
384 	u8 frev, crev;
385 	u8 mem_type;
386 	u32 mem_channel_number;
387 	u32 mem_channel_width;
388 
389 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
390 						    integratedsysteminfo);
391 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
392 					  index, &size,
393 					  &frev, &crev, &data_offset)) {
394 		igp_info = (union igp_info *)
395 			(mode_info->atom_context->bios + data_offset);
396 		switch (frev) {
397 		case 1:
398 			switch (crev) {
399 			case 11:
400 			case 12:
401 				mem_channel_number = igp_info->v11.umachannelnumber;
402 				if (!mem_channel_number)
403 					mem_channel_number = 1;
404 				mem_type = igp_info->v11.memorytype;
405 				if (mem_type == LpDdr5MemType)
406 					mem_channel_width = 32;
407 				else
408 					mem_channel_width = 64;
409 				if (vram_width)
410 					*vram_width = mem_channel_number * mem_channel_width;
411 				if (vram_type)
412 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
413 				break;
414 			default:
415 				return -EINVAL;
416 			}
417 			break;
418 		case 2:
419 			switch (crev) {
420 			case 1:
421 			case 2:
422 				mem_channel_number = igp_info->v21.umachannelnumber;
423 				if (!mem_channel_number)
424 					mem_channel_number = 1;
425 				mem_type = igp_info->v21.memorytype;
426 				if (mem_type == LpDdr5MemType)
427 					mem_channel_width = 32;
428 				else
429 					mem_channel_width = 64;
430 				if (vram_width)
431 					*vram_width = mem_channel_number * mem_channel_width;
432 				if (vram_type)
433 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
434 				break;
435 			case 3:
436 				mem_channel_number = igp_info->v23.umachannelnumber;
437 				if (!mem_channel_number)
438 					mem_channel_number = 1;
439 				mem_type = igp_info->v23.memorytype;
440 				if (mem_type == LpDdr5MemType)
441 					mem_channel_width = 32;
442 				else
443 					mem_channel_width = 64;
444 				if (vram_width)
445 					*vram_width = mem_channel_number * mem_channel_width;
446 				if (vram_type)
447 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
448 				break;
449 			default:
450 				return -EINVAL;
451 			}
452 			break;
453 		default:
454 			return -EINVAL;
455 		}
456 	} else {
457 		return -EINVAL;
458 	}
459 	return 0;
460 }
461 
462 int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
463 				  int *vram_width, int *vram_type,
464 				  int *vram_vendor)
465 {
466 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
467 	int index;
468 	u16 data_offset, size;
469 	union umc_info *umc_info;
470 	u8 frev, crev;
471 	u8 mem_type;
472 	u8 mem_vendor;
473 	u32 mem_channel_number;
474 	u32 mem_channel_width;
475 
476 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
477 
478 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
479 					  index, &size,
480 					  &frev, &crev, &data_offset)) {
481 		umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
482 
483 		if (frev == 4) {
484 			switch (crev) {
485 			case 0:
486 				mem_channel_number = le32_to_cpu(umc_info->v40.channel_num);
487 				mem_type = le32_to_cpu(umc_info->v40.vram_type);
488 				mem_channel_width = le32_to_cpu(umc_info->v40.channel_width);
489 				mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
490 				if (vram_vendor)
491 					*vram_vendor = mem_vendor;
492 				if (vram_type)
493 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
494 				if (vram_width)
495 					*vram_width = mem_channel_number * (1 << mem_channel_width);
496 				break;
497 			default:
498 				return -EINVAL;
499 			}
500 		} else {
501 			return -EINVAL;
502 		}
503 	} else {
504 		return -EINVAL;
505 	}
506 
507 	return 0;
508 }
509 
510 int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
511 				  int *vram_width, int *vram_type,
512 				  int *vram_vendor)
513 {
514 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
515 	int index, i = 0;
516 	u16 data_offset, size;
517 	union vram_info *vram_info;
518 	union vram_module *vram_module;
519 	u8 frev, crev;
520 	u8 mem_type;
521 	u8 mem_vendor;
522 	u32 mem_channel_number;
523 	u32 mem_channel_width;
524 	u32 module_id;
525 
526 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
527 
528 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
529 					  index, &size,
530 					  &frev, &crev, &data_offset)) {
531 		vram_info = (union vram_info *)
532 			(mode_info->atom_context->bios + data_offset);
533 
534 		module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
535 		if (frev == 3) {
536 			switch (crev) {
537 			/* v30 */
538 			case 0:
539 				vram_module = (union vram_module *)vram_info->v30.vram_module;
540 				mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
541 				if (vram_vendor)
542 					*vram_vendor = mem_vendor;
543 				mem_type = vram_info->v30.memory_type;
544 				if (vram_type)
545 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
546 				mem_channel_number = vram_info->v30.channel_num;
547 				mem_channel_width = vram_info->v30.channel_width;
548 				if (vram_width)
549 					*vram_width = mem_channel_number * 16;
550 				break;
551 			default:
552 				return -EINVAL;
553 			}
554 		} else if (frev == 2) {
555 			switch (crev) {
556 			/* v23 */
557 			case 3:
558 				if (module_id > vram_info->v23.vram_module_num)
559 					module_id = 0;
560 				vram_module = (union vram_module *)vram_info->v23.vram_module;
561 				while (i < module_id) {
562 					vram_module = (union vram_module *)
563 						((u8 *)vram_module + vram_module->v9.vram_module_size);
564 					i++;
565 				}
566 				mem_type = vram_module->v9.memory_type;
567 				if (vram_type)
568 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
569 				mem_channel_number = vram_module->v9.channel_num;
570 				mem_channel_width = vram_module->v9.channel_width;
571 				if (vram_width)
572 					*vram_width = mem_channel_number * (1 << mem_channel_width);
573 				mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
574 				if (vram_vendor)
575 					*vram_vendor = mem_vendor;
576 				break;
577 			/* v24 */
578 			case 4:
579 				if (module_id > vram_info->v24.vram_module_num)
580 					module_id = 0;
581 				vram_module = (union vram_module *)vram_info->v24.vram_module;
582 				while (i < module_id) {
583 					vram_module = (union vram_module *)
584 						((u8 *)vram_module + vram_module->v10.vram_module_size);
585 					i++;
586 				}
587 				mem_type = vram_module->v10.memory_type;
588 				if (vram_type)
589 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
590 				mem_channel_number = vram_module->v10.channel_num;
591 				mem_channel_width = vram_module->v10.channel_width;
592 				if (vram_width)
593 					*vram_width = mem_channel_number * (1 << mem_channel_width);
594 				mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
595 				if (vram_vendor)
596 					*vram_vendor = mem_vendor;
597 				break;
598 			/* v25 */
599 			case 5:
600 				if (module_id > vram_info->v25.vram_module_num)
601 					module_id = 0;
602 				vram_module = (union vram_module *)vram_info->v25.vram_module;
603 				while (i < module_id) {
604 					vram_module = (union vram_module *)
605 						((u8 *)vram_module + vram_module->v11.vram_module_size);
606 					i++;
607 				}
608 				mem_type = vram_module->v11.memory_type;
609 				if (vram_type)
610 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
611 				mem_channel_number = vram_module->v11.channel_num;
612 				mem_channel_width = vram_module->v11.channel_width;
613 				if (vram_width)
614 					*vram_width = mem_channel_number * (1 << mem_channel_width);
615 				mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
616 				if (vram_vendor)
617 					*vram_vendor = mem_vendor;
618 				break;
619 			/* v26 */
620 			case 6:
621 				if (module_id > vram_info->v26.vram_module_num)
622 					module_id = 0;
623 				vram_module = (union vram_module *)vram_info->v26.vram_module;
624 				while (i < module_id) {
625 					vram_module = (union vram_module *)
626 						((u8 *)vram_module + vram_module->v9.vram_module_size);
627 					i++;
628 				}
629 				mem_type = vram_module->v9.memory_type;
630 				if (vram_type)
631 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
632 				mem_channel_number = vram_module->v9.channel_num;
633 				mem_channel_width = vram_module->v9.channel_width;
634 				if (vram_width)
635 					*vram_width = mem_channel_number * (1 << mem_channel_width);
636 				mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
637 				if (vram_vendor)
638 					*vram_vendor = mem_vendor;
639 				break;
640 			default:
641 				return -EINVAL;
642 			}
643 		} else {
644 			/* invalid frev */
645 			return -EINVAL;
646 		}
647 
648 	} else {
649 		return -EINVAL;
650 	}
651 
652 	return 0;
653 }
654 
655 /*
656  * Return true if vbios enabled ecc by default, if umc info table is available
657  * or false if ecc is not enabled or umc info table is not available
658  */
659 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
660 {
661 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
662 	int index;
663 	u16 data_offset, size;
664 	union umc_info *umc_info;
665 	u8 frev, crev;
666 	bool mem_ecc_enabled = false;
667 	u8 umc_config;
668 	u32 umc_config1;
669 	adev->ras_default_ecc_enabled = false;
670 
671 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
672 			umc_info);
673 
674 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
675 				index, &size, &frev, &crev, &data_offset)) {
676 		umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
677 		if (frev == 3) {
678 			switch (crev) {
679 			case 1:
680 				umc_config = le32_to_cpu(umc_info->v31.umc_config);
681 				mem_ecc_enabled =
682 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
683 				break;
684 			case 2:
685 				umc_config = le32_to_cpu(umc_info->v32.umc_config);
686 				mem_ecc_enabled =
687 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
688 				break;
689 			case 3:
690 				umc_config = le32_to_cpu(umc_info->v33.umc_config);
691 				umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
692 				mem_ecc_enabled =
693 					((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
694 					 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
695 				adev->ras_default_ecc_enabled =
696 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
697 				break;
698 			default:
699 				/* unsupported crev */
700 				return false;
701 			}
702 		} else if (frev == 4) {
703 			switch (crev) {
704 			case 0:
705 				umc_config = le32_to_cpu(umc_info->v40.umc_config);
706 				umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
707 				mem_ecc_enabled =
708 					(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
709 				adev->ras_default_ecc_enabled =
710 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
711 				break;
712 			default:
713 				/* unsupported crev */
714 				return false;
715 			}
716 		} else {
717 			/* unsupported frev */
718 			return false;
719 		}
720 	}
721 
722 	return mem_ecc_enabled;
723 }
724 
725 /*
726  * Helper function to query sram ecc capablity
727  *
728  * @adev: amdgpu_device pointer
729  *
730  * Return true if vbios supports sram ecc or false if not
731  */
732 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
733 {
734 	u32 fw_cap;
735 
736 	fw_cap = adev->mode_info.firmware_flags;
737 
738 	return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
739 }
740 
741 /*
742  * Helper function to query dynamic boot config capability
743  *
744  * @adev: amdgpu_device pointer
745  *
746  * Return true if vbios supports dynamic boot config or false if not
747  */
748 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
749 {
750 	u32 fw_cap;
751 
752 	fw_cap = adev->mode_info.firmware_flags;
753 
754 	return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
755 }
756 
757 /**
758  * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
759  * @adev: amdgpu_device pointer
760  * @i2c_address: pointer to u8; if not NULL, will contain
761  *    the RAS EEPROM address if the function returns true
762  *
763  * Return true if VBIOS supports RAS EEPROM address reporting,
764  * else return false. If true and @i2c_address is not NULL,
765  * will contain the RAS ROM address.
766  */
767 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
768 				      u8 *i2c_address)
769 {
770 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
771 	int index;
772 	u16 data_offset, size;
773 	union firmware_info *firmware_info;
774 	u8 frev, crev;
775 
776 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
777 					    firmwareinfo);
778 
779 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
780 					  index, &size, &frev, &crev,
781 					  &data_offset)) {
782 		/* support firmware_info 3.4 + */
783 		if ((frev == 3 && crev >= 4) || (frev > 3)) {
784 			firmware_info = (union firmware_info *)
785 				(mode_info->atom_context->bios + data_offset);
786 			/* The ras_rom_i2c_slave_addr should ideally
787 			 * be a 19-bit EEPROM address, which would be
788 			 * used as is by the driver; see top of
789 			 * amdgpu_eeprom.c.
790 			 *
791 			 * When this is the case, 0 is of course a
792 			 * valid RAS EEPROM address, in which case,
793 			 * we'll drop the first "if (firm...)" and only
794 			 * leave the check for the pointer.
795 			 *
796 			 * The reason this works right now is because
797 			 * ras_rom_i2c_slave_addr contains the EEPROM
798 			 * device type qualifier 1010b in the top 4
799 			 * bits.
800 			 */
801 			if (firmware_info->v34.ras_rom_i2c_slave_addr) {
802 				if (i2c_address)
803 					*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
804 				return true;
805 			}
806 		}
807 	}
808 
809 	return false;
810 }
811 
812 
813 union smu_info {
814 	struct atom_smu_info_v3_1 v31;
815 	struct atom_smu_info_v4_0 v40;
816 };
817 
818 union gfx_info {
819 	struct atom_gfx_info_v2_2 v22;
820 	struct atom_gfx_info_v2_4 v24;
821 	struct atom_gfx_info_v2_7 v27;
822 	struct atom_gfx_info_v3_0 v30;
823 };
824 
825 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
826 {
827 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
828 	struct amdgpu_pll *spll = &adev->clock.spll;
829 	struct amdgpu_pll *mpll = &adev->clock.mpll;
830 	uint8_t frev, crev;
831 	uint16_t data_offset;
832 	int ret = -EINVAL, index;
833 
834 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
835 					    firmwareinfo);
836 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
837 				   &frev, &crev, &data_offset)) {
838 		union firmware_info *firmware_info =
839 			(union firmware_info *)(mode_info->atom_context->bios +
840 						data_offset);
841 
842 		adev->clock.default_sclk =
843 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
844 		adev->clock.default_mclk =
845 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
846 
847 		adev->pm.current_sclk = adev->clock.default_sclk;
848 		adev->pm.current_mclk = adev->clock.default_mclk;
849 
850 		ret = 0;
851 	}
852 
853 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
854 					    smu_info);
855 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
856 				   &frev, &crev, &data_offset)) {
857 		union smu_info *smu_info =
858 			(union smu_info *)(mode_info->atom_context->bios +
859 					   data_offset);
860 
861 		/* system clock */
862 		if (frev == 3)
863 			spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
864 		else if (frev == 4)
865 			spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
866 
867 		spll->reference_div = 0;
868 		spll->min_post_div = 1;
869 		spll->max_post_div = 1;
870 		spll->min_ref_div = 2;
871 		spll->max_ref_div = 0xff;
872 		spll->min_feedback_div = 4;
873 		spll->max_feedback_div = 0xff;
874 		spll->best_vco = 0;
875 
876 		ret = 0;
877 	}
878 
879 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
880 					    umc_info);
881 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
882 				   &frev, &crev, &data_offset)) {
883 		union umc_info *umc_info =
884 			(union umc_info *)(mode_info->atom_context->bios +
885 					   data_offset);
886 
887 		/* memory clock */
888 		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
889 
890 		mpll->reference_div = 0;
891 		mpll->min_post_div = 1;
892 		mpll->max_post_div = 1;
893 		mpll->min_ref_div = 2;
894 		mpll->max_ref_div = 0xff;
895 		mpll->min_feedback_div = 4;
896 		mpll->max_feedback_div = 0xff;
897 		mpll->best_vco = 0;
898 
899 		ret = 0;
900 	}
901 
902 	/* if asic is Navi+, the rlc reference clock is used for system clock
903 	 * from vbios gfx_info table */
904 	if (adev->asic_type >= CHIP_NAVI10) {
905 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
906 						   gfx_info);
907 		if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
908 					  &frev, &crev, &data_offset)) {
909 			union gfx_info *gfx_info = (union gfx_info *)
910 				(mode_info->atom_context->bios + data_offset);
911 			if ((frev == 3) ||
912 			    (frev == 2 && crev == 6)) {
913 				spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
914 				ret = 0;
915 			} else if ((frev == 2) &&
916 				   (crev >= 2) &&
917 				   (crev != 6)) {
918 				spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
919 				ret = 0;
920 			} else {
921 				BUG();
922 			}
923 		}
924 	}
925 
926 	return ret;
927 }
928 
929 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
930 {
931 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
932 	int index;
933 	uint8_t frev, crev;
934 	uint16_t data_offset;
935 
936 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
937 					    gfx_info);
938 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
939 				   &frev, &crev, &data_offset)) {
940 		union gfx_info *gfx_info = (union gfx_info *)
941 			(mode_info->atom_context->bios + data_offset);
942 		if (frev == 2) {
943 			switch (crev) {
944 			case 4:
945 				adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
946 				adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
947 				adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
948 				adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
949 				adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
950 				adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
951 				adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
952 				adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
953 				adev->gfx.config.gs_prim_buffer_depth =
954 					le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
955 				adev->gfx.config.double_offchip_lds_buf =
956 					gfx_info->v24.gc_double_offchip_lds_buffer;
957 				adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
958 				adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
959 				adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
960 				adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
961 				return 0;
962 			case 7:
963 				adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
964 				adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
965 				adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
966 				adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
967 				adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
968 				adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
969 				adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
970 				adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
971 				adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
972 				adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
973 				adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
974 				adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
975 				adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
976 				adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
977 				return 0;
978 			default:
979 				return -EINVAL;
980 			}
981 		} else if (frev == 3) {
982 			switch (crev) {
983 			case 0:
984 				adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
985 				adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
986 				adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
987 				adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
988 				adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
989 				return 0;
990 			default:
991 				return -EINVAL;
992 			}
993 		} else {
994 			return -EINVAL;
995 		}
996 
997 	}
998 	return -EINVAL;
999 }
1000 
1001 /*
1002  * Helper function to query two stage mem training capability
1003  *
1004  * @adev: amdgpu_device pointer
1005  *
1006  * Return true if two stage mem training is supported or false if not
1007  */
1008 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
1009 {
1010 	u32 fw_cap;
1011 
1012 	fw_cap = adev->mode_info.firmware_flags;
1013 
1014 	return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
1015 }
1016 
1017 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
1018 {
1019 	struct atom_context *ctx = adev->mode_info.atom_context;
1020 	union firmware_info *firmware_info;
1021 	int index;
1022 	u16 data_offset, size;
1023 	u8 frev, crev;
1024 	int fw_reserved_fb_size;
1025 
1026 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
1027 			firmwareinfo);
1028 
1029 	if (!amdgpu_atom_parse_data_header(ctx, index, &size,
1030 				&frev, &crev, &data_offset))
1031 		/* fail to parse data_header */
1032 		return 0;
1033 
1034 	firmware_info = (union firmware_info *)(ctx->bios + data_offset);
1035 
1036 	if (frev != 3)
1037 		return -EINVAL;
1038 
1039 	switch (crev) {
1040 	case 4:
1041 		fw_reserved_fb_size =
1042 			(firmware_info->v34.fw_reserved_size_in_kb << 10);
1043 		break;
1044 	case 5:
1045 		fw_reserved_fb_size =
1046 			(firmware_info->v35.fw_reserved_size_in_kb << 10);
1047 		break;
1048 	default:
1049 		fw_reserved_fb_size = 0;
1050 		break;
1051 	}
1052 
1053 	return fw_reserved_fb_size;
1054 }
1055 
1056 /*
1057  * Helper function to execute asic_init table
1058  *
1059  * @adev: amdgpu_device pointer
1060  * @fb_reset: flag to indicate whether fb is reset or not
1061  *
1062  * Return 0 if succeed, otherwise failed
1063  */
1064 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
1065 {
1066 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1067 	struct atom_context *ctx;
1068 	uint8_t frev, crev;
1069 	uint16_t data_offset;
1070 	uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
1071 	struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
1072 	int index;
1073 
1074 	if (!mode_info)
1075 		return -EINVAL;
1076 
1077 	ctx = mode_info->atom_context;
1078 	if (!ctx)
1079 		return -EINVAL;
1080 
1081 	/* query bootup sclk/mclk from firmware_info table */
1082 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
1083 					    firmwareinfo);
1084 	if (amdgpu_atom_parse_data_header(ctx, index, NULL,
1085 				&frev, &crev, &data_offset)) {
1086 		union firmware_info *firmware_info =
1087 			(union firmware_info *)(ctx->bios +
1088 						data_offset);
1089 
1090 		bootup_sclk_in10khz =
1091 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
1092 		bootup_mclk_in10khz =
1093 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
1094 	} else {
1095 		return -EINVAL;
1096 	}
1097 
1098 	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
1099 					asic_init);
1100 	if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
1101 		if (frev == 2 && crev >= 1) {
1102 			memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
1103 			asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
1104 			asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
1105 			asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
1106 			if (!fb_reset)
1107 				asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
1108 			else
1109 				asic_init_ps_v2_1.param.memparam.memflag = 0;
1110 		} else {
1111 			return -EINVAL;
1112 		}
1113 	} else {
1114 		return -EINVAL;
1115 	}
1116 
1117 	return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
1118 		sizeof(asic_init_ps_v2_1));
1119 }
1120