xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c (revision 6704d98a4f48b7424edc0f7ae2a06c0a8af02e2f)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #ifdef CONFIG_X86
29 #include <asm/hypervisor.h>
30 #endif
31 
32 #include "amdgpu.h"
33 #include "amdgpu_gmc.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_reset.h"
36 #include "amdgpu_xgmi.h"
37 
38 #include <drm/drm_drv.h>
39 #include <drm/ttm/ttm_tt.h>
40 
41 static const u64 four_gb = 0x100000000ULL;
42 
43 bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
44 {
45 	return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
46 }
47 
48 /**
49  * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
50  *
51  * @adev: amdgpu_device pointer
52  *
53  * Allocate video memory for pdb0 and map it for CPU access
54  * Returns 0 for success, error for failure.
55  */
56 int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
57 {
58 	int r;
59 	struct amdgpu_bo_param bp;
60 	u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
61 	uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
62 	uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift;
63 
64 	memset(&bp, 0, sizeof(bp));
65 	bp.size = PAGE_ALIGN((npdes + 1) * 8);
66 	bp.byte_align = PAGE_SIZE;
67 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
68 	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
69 		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
70 	bp.type = ttm_bo_type_kernel;
71 	bp.resv = NULL;
72 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
73 
74 	r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
75 	if (r)
76 		return r;
77 
78 	r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
79 	if (unlikely(r != 0))
80 		goto bo_reserve_failure;
81 
82 	r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
83 	if (r)
84 		goto bo_pin_failure;
85 	r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
86 	if (r)
87 		goto bo_kmap_failure;
88 
89 	amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
90 	return 0;
91 
92 bo_kmap_failure:
93 	amdgpu_bo_unpin(adev->gmc.pdb0_bo);
94 bo_pin_failure:
95 	amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
96 bo_reserve_failure:
97 	amdgpu_bo_unref(&adev->gmc.pdb0_bo);
98 	return r;
99 }
100 
101 /**
102  * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
103  *
104  * @bo: the BO to get the PDE for
105  * @level: the level in the PD hirarchy
106  * @addr: resulting addr
107  * @flags: resulting flags
108  *
109  * Get the address and flags to be used for a PDE (Page Directory Entry).
110  */
111 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
112 			       uint64_t *addr, uint64_t *flags)
113 {
114 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
115 
116 	switch (bo->tbo.resource->mem_type) {
117 	case TTM_PL_TT:
118 		*addr = bo->tbo.ttm->dma_address[0];
119 		break;
120 	case TTM_PL_VRAM:
121 		*addr = amdgpu_bo_gpu_offset(bo);
122 		break;
123 	default:
124 		*addr = 0;
125 		break;
126 	}
127 	*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
128 	amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
129 }
130 
131 /*
132  * amdgpu_gmc_pd_addr - return the address of the root directory
133  */
134 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
135 {
136 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
137 	uint64_t pd_addr;
138 
139 	/* TODO: move that into ASIC specific code */
140 	if (adev->asic_type >= CHIP_VEGA10) {
141 		uint64_t flags = AMDGPU_PTE_VALID;
142 
143 		amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
144 		pd_addr |= flags;
145 	} else {
146 		pd_addr = amdgpu_bo_gpu_offset(bo);
147 	}
148 	return pd_addr;
149 }
150 
151 /**
152  * amdgpu_gmc_set_pte_pde - update the page tables using CPU
153  *
154  * @adev: amdgpu_device pointer
155  * @cpu_pt_addr: cpu address of the page table
156  * @gpu_page_idx: entry in the page table to update
157  * @addr: dst addr to write into pte/pde
158  * @flags: access flags
159  *
160  * Update the page tables using CPU.
161  */
162 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
163 				uint32_t gpu_page_idx, uint64_t addr,
164 				uint64_t flags)
165 {
166 	void __iomem *ptr = (void *)cpu_pt_addr;
167 	uint64_t value;
168 
169 	/*
170 	 * The following is for PTE only. GART does not have PDEs.
171 	*/
172 	value = addr & 0x0000FFFFFFFFF000ULL;
173 	value |= flags;
174 	writeq(value, ptr + (gpu_page_idx * 8));
175 
176 	return 0;
177 }
178 
179 /**
180  * amdgpu_gmc_agp_addr - return the address in the AGP address space
181  *
182  * @bo: TTM BO which needs the address, must be in GTT domain
183  *
184  * Tries to figure out how to access the BO through the AGP aperture. Returns
185  * AMDGPU_BO_INVALID_OFFSET if that is not possible.
186  */
187 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
188 {
189 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
190 
191 	if (!bo->ttm)
192 		return AMDGPU_BO_INVALID_OFFSET;
193 
194 	if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
195 		return AMDGPU_BO_INVALID_OFFSET;
196 
197 	if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
198 		return AMDGPU_BO_INVALID_OFFSET;
199 
200 	return adev->gmc.agp_start + bo->ttm->dma_address[0];
201 }
202 
203 /**
204  * amdgpu_gmc_vram_location - try to find VRAM location
205  *
206  * @adev: amdgpu device structure holding all necessary information
207  * @mc: memory controller structure holding memory information
208  * @base: base address at which to put VRAM
209  *
210  * Function will try to place VRAM at base address provided
211  * as parameter.
212  */
213 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
214 			      u64 base)
215 {
216 	uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
217 	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
218 
219 	mc->vram_start = base;
220 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
221 	if (limit < mc->real_vram_size)
222 		mc->real_vram_size = limit;
223 
224 	if (vis_limit && vis_limit < mc->visible_vram_size)
225 		mc->visible_vram_size = vis_limit;
226 
227 	if (mc->real_vram_size < mc->visible_vram_size)
228 		mc->visible_vram_size = mc->real_vram_size;
229 
230 	if (mc->xgmi.num_physical_nodes == 0) {
231 		mc->fb_start = mc->vram_start;
232 		mc->fb_end = mc->vram_end;
233 	}
234 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
235 			mc->mc_vram_size >> 20, mc->vram_start,
236 			mc->vram_end, mc->real_vram_size >> 20);
237 }
238 
239 /** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
240  *
241  * @adev: amdgpu device structure holding all necessary information
242  * @mc: memory controller structure holding memory information
243  *
244  * This function is only used if use GART for FB translation. In such
245  * case, we use sysvm aperture (vmid0 page tables) for both vram
246  * and gart (aka system memory) access.
247  *
248  * GPUVM (and our organization of vmid0 page tables) require sysvm
249  * aperture to be placed at a location aligned with 8 times of native
250  * page size. For example, if vm_context0_cntl.page_table_block_size
251  * is 12, then native page size is 8G (2M*2^12), sysvm should start
252  * with a 64G aligned address. For simplicity, we just put sysvm at
253  * address 0. So vram start at address 0 and gart is right after vram.
254  */
255 void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
256 {
257 	u64 hive_vram_start = 0;
258 	u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
259 	mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
260 	mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
261 	/* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
262 	mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
263 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
264 	if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
265 		/* set mc->vram_start to 0 to switch the returned GPU address of
266 		 * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
267 		 */
268 		mc->vram_start = 0;
269 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
270 		mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
271 	} else {
272 		mc->fb_start = hive_vram_start;
273 		mc->fb_end = hive_vram_end;
274 	}
275 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
276 			mc->mc_vram_size >> 20, mc->vram_start,
277 			mc->vram_end, mc->real_vram_size >> 20);
278 	dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
279 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
280 }
281 
282 /**
283  * amdgpu_gmc_gart_location - try to find GART location
284  *
285  * @adev: amdgpu device structure holding all necessary information
286  * @mc: memory controller structure holding memory information
287  * @gart_placement: GART placement policy with respect to VRAM
288  *
289  * Function will try to place GART before or after VRAM.
290  * If GART size is bigger than space left then we ajust GART size.
291  * Thus function will never fails.
292  */
293 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
294 			      enum amdgpu_gart_placement gart_placement)
295 {
296 	u64 size_af, size_bf;
297 	/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
298 	u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
299 
300 	/* VCE doesn't like it when BOs cross a 4GB segment, so align
301 	 * the GART base on a 4GB boundary as well.
302 	 */
303 	size_bf = mc->fb_start;
304 	size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
305 
306 	if (mc->gart_size > max(size_bf, size_af)) {
307 		dev_warn(adev->dev, "limiting GART\n");
308 		mc->gart_size = max(size_bf, size_af);
309 	}
310 
311 	switch (gart_placement) {
312 	case AMDGPU_GART_PLACEMENT_HIGH:
313 		mc->gart_start = max_mc_address - mc->gart_size + 1;
314 		break;
315 	case AMDGPU_GART_PLACEMENT_LOW:
316 		mc->gart_start = 0;
317 		break;
318 	case AMDGPU_GART_PLACEMENT_BEST_FIT:
319 	default:
320 		if ((size_bf >= mc->gart_size && size_bf < size_af) ||
321 		    (size_af < mc->gart_size))
322 			mc->gart_start = 0;
323 		else
324 			mc->gart_start = max_mc_address - mc->gart_size + 1;
325 		break;
326 	}
327 
328 	mc->gart_start &= ~(four_gb - 1);
329 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
330 	dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
331 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
332 }
333 
334 /**
335  * amdgpu_gmc_agp_location - try to find AGP location
336  * @adev: amdgpu device structure holding all necessary information
337  * @mc: memory controller structure holding memory information
338  *
339  * Function will place try to find a place for the AGP BAR in the MC address
340  * space.
341  *
342  * AGP BAR will be assigned the largest available hole in the address space.
343  * Should be called after VRAM and GART locations are setup.
344  */
345 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
346 {
347 	const uint64_t sixteen_gb = 1ULL << 34;
348 	const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
349 	u64 size_af, size_bf;
350 
351 	if (mc->fb_start > mc->gart_start) {
352 		size_bf = (mc->fb_start & sixteen_gb_mask) -
353 			ALIGN(mc->gart_end + 1, sixteen_gb);
354 		size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
355 	} else {
356 		size_bf = mc->fb_start & sixteen_gb_mask;
357 		size_af = (mc->gart_start & sixteen_gb_mask) -
358 			ALIGN(mc->fb_end + 1, sixteen_gb);
359 	}
360 
361 	if (size_bf > size_af) {
362 		mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
363 		mc->agp_size = size_bf;
364 	} else {
365 		mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
366 		mc->agp_size = size_af;
367 	}
368 
369 	mc->agp_end = mc->agp_start + mc->agp_size - 1;
370 	dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
371 			mc->agp_size >> 20, mc->agp_start, mc->agp_end);
372 }
373 
374 /**
375  * amdgpu_gmc_set_agp_default - Set the default AGP aperture value.
376  * @adev: amdgpu device structure holding all necessary information
377  * @mc: memory controller structure holding memory information
378  *
379  * To disable the AGP aperture, you need to set the start to a larger
380  * value than the end.  This function sets the default value which
381  * can then be overridden using amdgpu_gmc_agp_location() if you want
382  * to enable the AGP aperture on a specific chip.
383  *
384  */
385 void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
386 				struct amdgpu_gmc *mc)
387 {
388 	mc->agp_start = 0xffffffffffff;
389 	mc->agp_end = 0;
390 	mc->agp_size = 0;
391 }
392 
393 /**
394  * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
395  *
396  * @addr: 48 bit physical address, page aligned (36 significant bits)
397  * @pasid: 16 bit process address space identifier
398  */
399 static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
400 {
401 	return addr << 4 | pasid;
402 }
403 
404 /**
405  * amdgpu_gmc_filter_faults - filter VM faults
406  *
407  * @adev: amdgpu device structure
408  * @ih: interrupt ring that the fault received from
409  * @addr: address of the VM fault
410  * @pasid: PASID of the process causing the fault
411  * @timestamp: timestamp of the fault
412  *
413  * Returns:
414  * True if the fault was filtered and should not be processed further.
415  * False if the fault is a new one and needs to be handled.
416  */
417 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
418 			      struct amdgpu_ih_ring *ih, uint64_t addr,
419 			      uint16_t pasid, uint64_t timestamp)
420 {
421 	struct amdgpu_gmc *gmc = &adev->gmc;
422 	uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
423 	struct amdgpu_gmc_fault *fault;
424 	uint32_t hash;
425 
426 	/* Stale retry fault if timestamp goes backward */
427 	if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp))
428 		return true;
429 
430 	/* If we don't have space left in the ring buffer return immediately */
431 	stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
432 		AMDGPU_GMC_FAULT_TIMEOUT;
433 	if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
434 		return true;
435 
436 	/* Try to find the fault in the hash */
437 	hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
438 	fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
439 	while (fault->timestamp >= stamp) {
440 		uint64_t tmp;
441 
442 		if (atomic64_read(&fault->key) == key) {
443 			/*
444 			 * if we get a fault which is already present in
445 			 * the fault_ring and the timestamp of
446 			 * the fault is after the expired timestamp,
447 			 * then this is a new fault that needs to be added
448 			 * into the fault ring.
449 			 */
450 			if (fault->timestamp_expiry != 0 &&
451 			    amdgpu_ih_ts_after(fault->timestamp_expiry,
452 					       timestamp))
453 				break;
454 			else
455 				return true;
456 		}
457 
458 		tmp = fault->timestamp;
459 		fault = &gmc->fault_ring[fault->next];
460 
461 		/* Check if the entry was reused */
462 		if (fault->timestamp >= tmp)
463 			break;
464 	}
465 
466 	/* Add the fault to the ring */
467 	fault = &gmc->fault_ring[gmc->last_fault];
468 	atomic64_set(&fault->key, key);
469 	fault->timestamp = timestamp;
470 
471 	/* And update the hash */
472 	fault->next = gmc->fault_hash[hash].idx;
473 	gmc->fault_hash[hash].idx = gmc->last_fault++;
474 	return false;
475 }
476 
477 /**
478  * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
479  *
480  * @adev: amdgpu device structure
481  * @addr: address of the VM fault
482  * @pasid: PASID of the process causing the fault
483  *
484  * Remove the address from fault filter, then future vm fault on this address
485  * will pass to retry fault handler to recover.
486  */
487 void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
488 				     uint16_t pasid)
489 {
490 	struct amdgpu_gmc *gmc = &adev->gmc;
491 	uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
492 	struct amdgpu_ih_ring *ih;
493 	struct amdgpu_gmc_fault *fault;
494 	uint32_t last_wptr;
495 	uint64_t last_ts;
496 	uint32_t hash;
497 	uint64_t tmp;
498 
499 	if (adev->irq.retry_cam_enabled)
500 		return;
501 
502 	ih = &adev->irq.ih1;
503 	/* Get the WPTR of the last entry in IH ring */
504 	last_wptr = amdgpu_ih_get_wptr(adev, ih);
505 	/* Order wptr with ring data. */
506 	rmb();
507 	/* Get the timetamp of the last entry in IH ring */
508 	last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1);
509 
510 	hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
511 	fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
512 	do {
513 		if (atomic64_read(&fault->key) == key) {
514 			/*
515 			 * Update the timestamp when this fault
516 			 * expired.
517 			 */
518 			fault->timestamp_expiry = last_ts;
519 			break;
520 		}
521 
522 		tmp = fault->timestamp;
523 		fault = &gmc->fault_ring[fault->next];
524 	} while (fault->timestamp < tmp);
525 }
526 
527 int amdgpu_gmc_handle_retry_fault(struct amdgpu_device *adev,
528 				  struct amdgpu_iv_entry *entry,
529 				  u64 addr,
530 				  u32 cam_index,
531 				  u32 node_id,
532 				  bool write_fault)
533 {
534 	int ret;
535 
536 	if (adev->irq.retry_cam_enabled) {
537 		/* Delegate it to a different ring if the hardware hasn't
538 		 * already done it.
539 		 */
540 		if (entry->ih == &adev->irq.ih) {
541 			amdgpu_irq_delegate(adev, entry, 8);
542 			return 1;
543 		}
544 
545 		ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
546 					     addr, entry->timestamp, write_fault);
547 		WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
548 		if (ret)
549 			return 1;
550 	} else {
551 		/* Process it only if it's the first fault for this address */
552 		if (entry->ih != &adev->irq.ih_soft &&
553 		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
554 					     entry->timestamp))
555 			return 1;
556 
557 		/* Delegate it to a different ring if the hardware hasn't
558 		 * already done it.
559 		 */
560 		if (entry->ih == &adev->irq.ih) {
561 			amdgpu_irq_delegate(adev, entry, 8);
562 			return 1;
563 		}
564 
565 		/* Try to handle the recoverable page faults by filling page
566 		 * tables
567 		 */
568 		if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
569 					   addr, entry->timestamp, write_fault))
570 			return 1;
571 	}
572 	return 0;
573 }
574 
575 int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
576 {
577 	int r;
578 
579 	/* umc ras block */
580 	r = amdgpu_umc_ras_sw_init(adev);
581 	if (r)
582 		return r;
583 
584 	/* mmhub ras block */
585 	r = amdgpu_mmhub_ras_sw_init(adev);
586 	if (r)
587 		return r;
588 
589 	/* hdp ras block */
590 	r = amdgpu_hdp_ras_sw_init(adev);
591 	if (r)
592 		return r;
593 
594 	/* mca.x ras block */
595 	r = amdgpu_mca_mp0_ras_sw_init(adev);
596 	if (r)
597 		return r;
598 
599 	r = amdgpu_mca_mp1_ras_sw_init(adev);
600 	if (r)
601 		return r;
602 
603 	r = amdgpu_mca_mpio_ras_sw_init(adev);
604 	if (r)
605 		return r;
606 
607 	/* xgmi ras block */
608 	r = amdgpu_xgmi_ras_sw_init(adev);
609 	if (r)
610 		return r;
611 
612 	return 0;
613 }
614 
615 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
616 {
617 	return 0;
618 }
619 
620 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
621 {
622 
623 }
624 
625 	/*
626 	 * The latest engine allocation on gfx9/10 is:
627 	 * Engine 2, 3: firmware
628 	 * Engine 0, 1, 4~16: amdgpu ring,
629 	 *                    subject to change when ring number changes
630 	 * Engine 17: Gart flushes
631 	 */
632 #define AMDGPU_VMHUB_INV_ENG_BITMAP		0x1FFF3
633 
634 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
635 {
636 	struct amdgpu_ring *ring;
637 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
638 	unsigned i;
639 	unsigned vmhub, inv_eng;
640 	struct amdgpu_ring *shared_ring;
641 
642 	/* init the vm inv eng for all vmhubs */
643 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
644 		vm_inv_engs[i] = AMDGPU_VMHUB_INV_ENG_BITMAP;
645 		/* reserve engine 5 for firmware */
646 		if (adev->enable_mes)
647 			vm_inv_engs[i] &= ~(1 << 5);
648 		/* reserve engine 6 for uni mes */
649 		if (adev->enable_uni_mes)
650 			vm_inv_engs[i] &= ~(1 << 6);
651 		/* reserve mmhub engine 3 for firmware */
652 		if (adev->enable_umsch_mm)
653 			vm_inv_engs[i] &= ~(1 << 3);
654 	}
655 
656 	for (i = 0; i < adev->num_rings; ++i) {
657 		ring = adev->rings[i];
658 		vmhub = ring->vm_hub;
659 
660 		if (ring == &adev->mes.ring[0] ||
661 		    ring == &adev->mes.ring[1] ||
662 		    ring == &adev->umsch_mm.ring ||
663 		    ring == &adev->cper.ring_buf)
664 			continue;
665 
666 		/* Skip if the ring is a shared ring */
667 		if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
668 			continue;
669 
670 		inv_eng = ffs(vm_inv_engs[vmhub]);
671 		if (!inv_eng) {
672 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
673 				ring->name);
674 			return -EINVAL;
675 		}
676 
677 		ring->vm_inv_eng = inv_eng - 1;
678 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
679 
680 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
681 			 ring->name, ring->vm_inv_eng, ring->vm_hub);
682 		/* SDMA has a special packet which allows it to use the same
683 		 * invalidation engine for all the rings in one instance.
684 		 * Therefore, we do not allocate a separate VM invalidation engine
685 		 * for SDMA page rings. Instead, they share the VM invalidation
686 		 * engine with the SDMA gfx ring. This change ensures efficient
687 		 * resource management and avoids the issue of insufficient VM
688 		 * invalidation engines.
689 		 */
690 		shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
691 		if (shared_ring) {
692 			shared_ring->vm_inv_eng = ring->vm_inv_eng;
693 			dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
694 					ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
695 			continue;
696 		}
697 	}
698 
699 	return 0;
700 }
701 
702 void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
703 			      uint32_t vmhub, uint32_t flush_type)
704 {
705 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
706 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
707 	struct dma_fence *fence;
708 	struct amdgpu_job *job;
709 	int r;
710 
711 	if (!hub->sdma_invalidation_workaround || vmid ||
712 	    !adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
713 	    !ring->sched.ready) {
714 		/*
715 		 * A GPU reset should flush all TLBs anyway, so no need to do
716 		 * this while one is ongoing.
717 		 */
718 		if (!down_read_trylock(&adev->reset_domain->sem))
719 			return;
720 
721 		if (adev->gmc.flush_tlb_needs_extra_type_2)
722 			adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
723 							   vmhub, 2);
724 
725 		if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
726 			adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
727 							   vmhub, 0);
728 
729 		adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub,
730 						   flush_type);
731 		up_read(&adev->reset_domain->sem);
732 		return;
733 	}
734 
735 	/* The SDMA on Navi 1x has a bug which can theoretically result in memory
736 	 * corruption if an invalidation happens at the same time as an VA
737 	 * translation. Avoid this by doing the invalidation from the SDMA
738 	 * itself at least for GART.
739 	 */
740 	mutex_lock(&adev->mman.gtt_window_lock);
741 	r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.default_entity.base,
742 				     AMDGPU_FENCE_OWNER_UNDEFINED,
743 				     16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
744 				     &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
745 	if (r)
746 		goto error_alloc;
747 
748 	job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
749 	job->vm_needs_flush = true;
750 	job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
751 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
752 	fence = amdgpu_job_submit(job);
753 	mutex_unlock(&adev->mman.gtt_window_lock);
754 
755 	dma_fence_wait(fence, false);
756 	dma_fence_put(fence);
757 
758 	return;
759 
760 error_alloc:
761 	mutex_unlock(&adev->mman.gtt_window_lock);
762 	dev_err(adev->dev, "Error flushing GPU TLB using the SDMA (%d)!\n", r);
763 }
764 
765 int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
766 				   uint32_t flush_type, bool all_hub,
767 				   uint32_t inst)
768 {
769 	struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
770 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
771 	unsigned int ndw;
772 	int r, cnt = 0;
773 	uint32_t seq;
774 
775 	/*
776 	 * A GPU reset should flush all TLBs anyway, so no need to do
777 	 * this while one is ongoing.
778 	 */
779 	if (!down_read_trylock(&adev->reset_domain->sem))
780 		return 0;
781 
782 	if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
783 
784 		if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) {
785 			r = 0;
786 			goto error_unlock_reset;
787 		}
788 
789 		if (adev->gmc.flush_tlb_needs_extra_type_2)
790 			adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
791 								 2, all_hub,
792 								 inst);
793 
794 		if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
795 			adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
796 								 0, all_hub,
797 								 inst);
798 
799 		adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
800 							 flush_type, all_hub,
801 							 inst);
802 		r = 0;
803 	} else {
804 		/* 2 dwords flush + 8 dwords fence */
805 		ndw = kiq->pmf->invalidate_tlbs_size + 8;
806 
807 		if (adev->gmc.flush_tlb_needs_extra_type_2)
808 			ndw += kiq->pmf->invalidate_tlbs_size;
809 
810 		if (adev->gmc.flush_tlb_needs_extra_type_0)
811 			ndw += kiq->pmf->invalidate_tlbs_size;
812 
813 		spin_lock(&adev->gfx.kiq[inst].ring_lock);
814 		r = amdgpu_ring_alloc(ring, ndw);
815 		if (r) {
816 			spin_unlock(&adev->gfx.kiq[inst].ring_lock);
817 			goto error_unlock_reset;
818 		}
819 		if (adev->gmc.flush_tlb_needs_extra_type_2)
820 			kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
821 
822 		if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
823 			kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
824 
825 		kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
826 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
827 		if (r) {
828 			amdgpu_ring_undo(ring);
829 			spin_unlock(&adev->gfx.kiq[inst].ring_lock);
830 			goto error_unlock_reset;
831 		}
832 
833 		amdgpu_ring_commit(ring);
834 		spin_unlock(&adev->gfx.kiq[inst].ring_lock);
835 
836 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
837 
838 		might_sleep();
839 		while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
840 		       !amdgpu_reset_pending(adev->reset_domain)) {
841 			msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
842 			r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
843 		}
844 
845 		if (cnt > MAX_KIQ_REG_TRY) {
846 			dev_err(adev->dev, "timeout waiting for kiq fence\n");
847 			r = -ETIME;
848 		} else
849 			r = 0;
850 	}
851 
852 error_unlock_reset:
853 	up_read(&adev->reset_domain->sem);
854 	return r;
855 }
856 
857 void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
858 				      uint32_t reg0, uint32_t reg1,
859 				      uint32_t ref, uint32_t mask,
860 				      uint32_t xcc_inst)
861 {
862 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
863 	struct amdgpu_ring *ring = &kiq->ring;
864 	signed long r, cnt = 0;
865 	unsigned long flags;
866 	uint32_t seq;
867 
868 	if (adev->mes.ring[MES_PIPE_INST(xcc_inst, 0)].sched.ready) {
869 		amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
870 					      ref, mask, xcc_inst);
871 		return;
872 	}
873 
874 	spin_lock_irqsave(&kiq->ring_lock, flags);
875 	amdgpu_ring_alloc(ring, 32);
876 	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
877 					    ref, mask);
878 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
879 	if (r)
880 		goto failed_undo;
881 
882 	amdgpu_ring_commit(ring);
883 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
884 
885 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
886 
887 	/* don't wait anymore for IRQ context */
888 	if (r < 1 && in_interrupt())
889 		goto failed_kiq;
890 
891 	might_sleep();
892 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
893 	       !amdgpu_reset_pending(adev->reset_domain)) {
894 
895 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
896 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
897 	}
898 
899 	if (cnt > MAX_KIQ_REG_TRY)
900 		goto failed_kiq;
901 
902 	return;
903 
904 failed_undo:
905 	amdgpu_ring_undo(ring);
906 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
907 failed_kiq:
908 	dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
909 }
910 
911 /**
912  * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
913  * @adev: amdgpu_device pointer
914  *
915  * Check and set if an the device @adev supports Trusted Memory
916  * Zones (TMZ).
917  */
918 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
919 {
920 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
921 	/* RAVEN */
922 	case IP_VERSION(9, 2, 2):
923 	case IP_VERSION(9, 1, 0):
924 	/* RENOIR looks like RAVEN */
925 	case IP_VERSION(9, 3, 0):
926 	/* GC 10.3.7 */
927 	case IP_VERSION(10, 3, 7):
928 	/* GC 11.0.1 */
929 	case IP_VERSION(11, 0, 1):
930 		if (amdgpu_tmz == 0) {
931 			adev->gmc.tmz_enabled = false;
932 			dev_info(adev->dev,
933 				 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
934 		} else {
935 			adev->gmc.tmz_enabled = true;
936 			dev_info(adev->dev,
937 				 "Trusted Memory Zone (TMZ) feature enabled\n");
938 		}
939 		break;
940 	case IP_VERSION(10, 1, 10):
941 	case IP_VERSION(10, 1, 1):
942 	case IP_VERSION(10, 1, 2):
943 	case IP_VERSION(10, 1, 3):
944 	case IP_VERSION(10, 3, 0):
945 	case IP_VERSION(10, 3, 2):
946 	case IP_VERSION(10, 3, 4):
947 	case IP_VERSION(10, 3, 5):
948 	case IP_VERSION(10, 3, 6):
949 	/* VANGOGH */
950 	case IP_VERSION(10, 3, 1):
951 	/* YELLOW_CARP*/
952 	case IP_VERSION(10, 3, 3):
953 	case IP_VERSION(11, 0, 4):
954 	case IP_VERSION(11, 5, 0):
955 	case IP_VERSION(11, 5, 1):
956 	case IP_VERSION(11, 5, 2):
957 	case IP_VERSION(11, 5, 3):
958 	case IP_VERSION(11, 5, 4):
959 		/* Don't enable it by default yet.
960 		 */
961 		if (amdgpu_tmz < 1) {
962 			adev->gmc.tmz_enabled = false;
963 			dev_info(adev->dev,
964 				 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
965 		} else {
966 			adev->gmc.tmz_enabled = true;
967 			dev_info(adev->dev,
968 				 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
969 		}
970 		break;
971 	default:
972 		adev->gmc.tmz_enabled = false;
973 		dev_info(adev->dev,
974 			 "Trusted Memory Zone (TMZ) feature not supported\n");
975 		break;
976 	}
977 }
978 
979 /**
980  * amdgpu_gmc_noretry_set -- set per asic noretry defaults
981  * @adev: amdgpu_device pointer
982  *
983  * Set a per asic default for the no-retry parameter.
984  *
985  */
986 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
987 {
988 	struct amdgpu_gmc *gmc = &adev->gmc;
989 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
990 	bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
991 				gc_ver == IP_VERSION(9, 4, 0) ||
992 				gc_ver == IP_VERSION(9, 4, 1) ||
993 				gc_ver == IP_VERSION(9, 4, 2) ||
994 				gc_ver == IP_VERSION(9, 4, 3) ||
995 				gc_ver == IP_VERSION(9, 4, 4) ||
996 				gc_ver == IP_VERSION(9, 5, 0) ||
997 				gc_ver >= IP_VERSION(10, 3, 0));
998 
999 	if (!amdgpu_sriov_xnack_support(adev))
1000 		gmc->noretry = 1;
1001 	else
1002 		gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
1003 }
1004 
1005 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
1006 				   bool enable)
1007 {
1008 	struct amdgpu_vmhub *hub;
1009 	u32 tmp, reg, i;
1010 
1011 	hub = &adev->vmhub[hub_type];
1012 	for (i = 0; i < 16; i++) {
1013 		reg = hub->vm_context0_cntl + hub->ctx_distance * i;
1014 
1015 		tmp = (hub_type == AMDGPU_GFXHUB(0)) ?
1016 			RREG32_SOC15_IP(GC, reg) :
1017 			RREG32_SOC15_IP(MMHUB, reg);
1018 
1019 		if (enable)
1020 			tmp |= hub->vm_cntx_cntl_vm_fault;
1021 		else
1022 			tmp &= ~hub->vm_cntx_cntl_vm_fault;
1023 
1024 		(hub_type == AMDGPU_GFXHUB(0)) ?
1025 			WREG32_SOC15_IP(GC, reg, tmp) :
1026 			WREG32_SOC15_IP(MMHUB, reg, tmp);
1027 	}
1028 }
1029 
1030 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
1031 {
1032 	unsigned size;
1033 
1034 	/*
1035 	 * Some ASICs need to reserve a region of video memory to avoid access
1036 	 * from driver
1037 	 */
1038 	adev->mman.stolen_reserved_offset = 0;
1039 	adev->mman.stolen_reserved_size = 0;
1040 
1041 	/*
1042 	 * TODO:
1043 	 * Currently there is a bug where some memory client outside
1044 	 * of the driver writes to first 8M of VRAM on S3 resume,
1045 	 * this overrides GART which by default gets placed in first 8M and
1046 	 * causes VM_FAULTS once GTT is accessed.
1047 	 * Keep the stolen memory reservation until the while this is not solved.
1048 	 */
1049 	switch (adev->asic_type) {
1050 	case CHIP_VEGA10:
1051 		adev->mman.keep_stolen_vga_memory = true;
1052 		/*
1053 		 * VEGA10 SRIOV VF with MS_HYPERV host needs some firmware reserved area.
1054 		 */
1055 #ifdef CONFIG_X86
1056 		if (amdgpu_sriov_vf(adev) && hypervisor_is_type(X86_HYPER_MS_HYPERV)) {
1057 			adev->mman.stolen_reserved_offset = 0x500000;
1058 			adev->mman.stolen_reserved_size = 0x200000;
1059 		}
1060 #endif
1061 		break;
1062 	case CHIP_RAVEN:
1063 	case CHIP_RENOIR:
1064 		adev->mman.keep_stolen_vga_memory = true;
1065 		break;
1066 	default:
1067 		adev->mman.keep_stolen_vga_memory = false;
1068 		break;
1069 	}
1070 
1071 	if (amdgpu_sriov_vf(adev) ||
1072 	    !amdgpu_device_has_display_hardware(adev)) {
1073 		size = 0;
1074 	} else {
1075 		size = amdgpu_gmc_get_vbios_fb_size(adev);
1076 
1077 		if (adev->mman.keep_stolen_vga_memory)
1078 			size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
1079 	}
1080 
1081 	/* set to 0 if the pre-OS buffer uses up most of vram */
1082 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1083 		size = 0;
1084 
1085 	if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
1086 		adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
1087 		adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
1088 	} else {
1089 		adev->mman.stolen_vga_size = size;
1090 		adev->mman.stolen_extended_size = 0;
1091 	}
1092 }
1093 
1094 /**
1095  * amdgpu_gmc_init_pdb0 - initialize PDB0
1096  *
1097  * @adev: amdgpu_device pointer
1098  *
1099  * This function is only used when GART page table is used
1100  * for FB address translatioin. In such a case, we construct
1101  * a 2-level system VM page table: PDB0->PTB, to cover both
1102  * VRAM of the hive and system memory.
1103  *
1104  * PDB0 is static, initialized once on driver initialization.
1105  * The first n entries of PDB0 are used as PTE by setting
1106  * P bit to 1, pointing to VRAM. The n+1'th entry points
1107  * to a big PTB covering system memory.
1108  *
1109  */
1110 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
1111 {
1112 	int i;
1113 	uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
1114 	/* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
1115 	 */
1116 	u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
1117 	u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
1118 	u64 vram_addr, vram_end;
1119 	u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
1120 	int idx;
1121 
1122 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1123 		return;
1124 
1125 	flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
1126 	flags |= AMDGPU_PTE_WRITEABLE;
1127 	flags |= AMDGPU_PTE_SNOOPED;
1128 	flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
1129 	flags |= AMDGPU_PDE_PTE_FLAG(adev);
1130 
1131 	vram_addr = adev->vm_manager.vram_base_offset;
1132 	if (!amdgpu_virt_xgmi_migrate_enabled(adev))
1133 		vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1134 	vram_end = vram_addr + vram_size;
1135 
1136 	/* The first n PDE0 entries are used as PTE,
1137 	 * pointing to vram
1138 	 */
1139 	for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
1140 		amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
1141 
1142 	/* The n+1'th PDE0 entry points to a huge
1143 	 * PTB who has more than 512 entries each
1144 	 * pointing to a 4K system page
1145 	 */
1146 	flags = AMDGPU_PTE_VALID;
1147 	flags |= AMDGPU_PTE_SNOOPED | AMDGPU_PDE_BFS_FLAG(adev, 0);
1148 	/* Requires gart_ptb_gpu_pa to be 4K aligned */
1149 	amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
1150 	drm_dev_exit(idx);
1151 }
1152 
1153 /**
1154  * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
1155  * address
1156  *
1157  * @adev: amdgpu_device pointer
1158  * @mc_addr: MC address of buffer
1159  */
1160 uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
1161 {
1162 	return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
1163 }
1164 
1165 /**
1166  * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
1167  * GPU's view
1168  *
1169  * @adev: amdgpu_device pointer
1170  * @bo: amdgpu buffer object
1171  */
1172 uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
1173 {
1174 	return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
1175 }
1176 
1177 int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
1178 {
1179 	struct amdgpu_bo *vram_bo = NULL;
1180 	uint64_t vram_gpu = 0;
1181 	void *vram_ptr = NULL;
1182 
1183 	int ret, size = 0x100000;
1184 	uint8_t cptr[10];
1185 
1186 	ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
1187 				AMDGPU_GEM_DOMAIN_VRAM,
1188 				&vram_bo,
1189 				&vram_gpu,
1190 				&vram_ptr);
1191 	if (ret)
1192 		return ret;
1193 
1194 	memset(vram_ptr, 0x86, size);
1195 	memset(cptr, 0x86, 10);
1196 
1197 	/**
1198 	 * Check the start, the mid, and the end of the memory if the content of
1199 	 * each byte is the pattern "0x86". If yes, we suppose the vram bo is
1200 	 * workable.
1201 	 *
1202 	 * Note: If check the each byte of whole 1M bo, it will cost too many
1203 	 * seconds, so here, we just pick up three parts for emulation.
1204 	 */
1205 	ret = memcmp(vram_ptr, cptr, 10);
1206 	if (ret) {
1207 		ret = -EIO;
1208 		goto release_buffer;
1209 	}
1210 
1211 	ret = memcmp(vram_ptr + (size / 2), cptr, 10);
1212 	if (ret) {
1213 		ret = -EIO;
1214 		goto release_buffer;
1215 	}
1216 
1217 	ret = memcmp(vram_ptr + size - 10, cptr, 10);
1218 	if (ret) {
1219 		ret = -EIO;
1220 		goto release_buffer;
1221 	}
1222 
1223 release_buffer:
1224 	amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
1225 			&vram_ptr);
1226 
1227 	return ret;
1228 }
1229 
1230 static const char *nps_desc[] = {
1231 	[AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
1232 	[AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
1233 	[AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
1234 	[AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
1235 	[AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
1236 	[AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
1237 };
1238 
1239 static ssize_t available_memory_partition_show(struct device *dev,
1240 					       struct device_attribute *addr,
1241 					       char *buf)
1242 {
1243 	struct drm_device *ddev = dev_get_drvdata(dev);
1244 	struct amdgpu_device *adev = drm_to_adev(ddev);
1245 	int size = 0, mode;
1246 	char *sep = "";
1247 
1248 	for_each_inst(mode, adev->gmc.supported_nps_modes) {
1249 		size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
1250 		sep = ", ";
1251 	}
1252 	size += sysfs_emit_at(buf, size, "\n");
1253 
1254 	return size;
1255 }
1256 
1257 static ssize_t current_memory_partition_store(struct device *dev,
1258 					      struct device_attribute *attr,
1259 					      const char *buf, size_t count)
1260 {
1261 	struct drm_device *ddev = dev_get_drvdata(dev);
1262 	struct amdgpu_device *adev = drm_to_adev(ddev);
1263 	enum amdgpu_memory_partition mode;
1264 	struct amdgpu_hive_info *hive;
1265 	int i;
1266 
1267 	mode = UNKNOWN_MEMORY_PARTITION_MODE;
1268 	for_each_inst(i, adev->gmc.supported_nps_modes) {
1269 		if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) {
1270 			mode = i;
1271 			break;
1272 		}
1273 	}
1274 
1275 	if (mode == UNKNOWN_MEMORY_PARTITION_MODE)
1276 		return -EINVAL;
1277 
1278 	if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) {
1279 		dev_info(
1280 			adev->dev,
1281 			"requested NPS mode is same as current NPS mode, skipping\n");
1282 		return count;
1283 	}
1284 
1285 	/* If device is part of hive, all devices in the hive should request the
1286 	 * same mode. Hence store the requested mode in hive.
1287 	 */
1288 	hive = amdgpu_get_xgmi_hive(adev);
1289 	if (hive) {
1290 		atomic_set(&hive->requested_nps_mode, mode);
1291 		amdgpu_put_xgmi_hive(hive);
1292 	} else {
1293 		adev->gmc.requested_nps_mode = mode;
1294 	}
1295 
1296 	dev_info(
1297 		adev->dev,
1298 		"NPS mode change requested, please remove and reload the driver\n");
1299 
1300 	return count;
1301 }
1302 
1303 static ssize_t current_memory_partition_show(
1304 	struct device *dev, struct device_attribute *addr, char *buf)
1305 {
1306 	struct drm_device *ddev = dev_get_drvdata(dev);
1307 	struct amdgpu_device *adev = drm_to_adev(ddev);
1308 	enum amdgpu_memory_partition mode;
1309 
1310 	/* Only minimal precaution taken to reject requests while in reset */
1311 	if (amdgpu_in_reset(adev))
1312 		return -EPERM;
1313 
1314 	mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1315 	if ((mode >= ARRAY_SIZE(nps_desc)) ||
1316 	    (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
1317 		return sysfs_emit(buf, "UNKNOWN\n");
1318 
1319 	return sysfs_emit(buf, "%s\n", nps_desc[mode]);
1320 }
1321 
1322 static DEVICE_ATTR_RW(current_memory_partition);
1323 static DEVICE_ATTR_RO(available_memory_partition);
1324 
1325 int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
1326 {
1327 	bool nps_switch_support;
1328 	int r = 0;
1329 
1330 	if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
1331 		return 0;
1332 
1333 	nps_switch_support = (hweight32(adev->gmc.supported_nps_modes &
1334 					AMDGPU_ALL_NPS_MASK) > 1);
1335 	if (!nps_switch_support)
1336 		dev_attr_current_memory_partition.attr.mode &=
1337 			~(S_IWUSR | S_IWGRP | S_IWOTH);
1338 	else
1339 		r = device_create_file(adev->dev,
1340 				       &dev_attr_available_memory_partition);
1341 
1342 	if (r)
1343 		return r;
1344 
1345 	return device_create_file(adev->dev,
1346 				  &dev_attr_current_memory_partition);
1347 }
1348 
1349 void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
1350 {
1351 	if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
1352 		return;
1353 
1354 	device_remove_file(adev->dev, &dev_attr_current_memory_partition);
1355 	device_remove_file(adev->dev, &dev_attr_available_memory_partition);
1356 }
1357 
1358 int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
1359 				 struct amdgpu_mem_partition_info *mem_ranges,
1360 				 uint8_t *exp_ranges)
1361 {
1362 	struct amdgpu_gmc_memrange *ranges;
1363 	int range_cnt, ret, i, j;
1364 	uint32_t nps_type;
1365 	bool refresh;
1366 
1367 	if (!mem_ranges || !exp_ranges)
1368 		return -EINVAL;
1369 
1370 	refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
1371 		  (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
1372 	ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
1373 					    &range_cnt, refresh);
1374 
1375 	if (ret)
1376 		return ret;
1377 
1378 	/* TODO: For now, expect ranges and partition count to be the same.
1379 	 * Adjust if there are holes expected in any NPS domain.
1380 	 */
1381 	if (*exp_ranges && (range_cnt != *exp_ranges)) {
1382 		dev_warn(
1383 			adev->dev,
1384 			"NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d",
1385 			*exp_ranges, nps_type, range_cnt);
1386 		ret = -EINVAL;
1387 		goto err;
1388 	}
1389 
1390 	for (i = 0; i < range_cnt; ++i) {
1391 		if (ranges[i].base_address >= ranges[i].limit_address) {
1392 			dev_warn(
1393 				adev->dev,
1394 				"Invalid NPS range - nps mode: %d, range[%d]: base: %llx limit: %llx",
1395 				nps_type, i, ranges[i].base_address,
1396 				ranges[i].limit_address);
1397 			ret = -EINVAL;
1398 			goto err;
1399 		}
1400 
1401 		/* Check for overlaps, not expecting any now */
1402 		for (j = i - 1; j >= 0; j--) {
1403 			if (max(ranges[j].base_address,
1404 				ranges[i].base_address) <=
1405 			    min(ranges[j].limit_address,
1406 				ranges[i].limit_address)) {
1407 				dev_warn(
1408 					adev->dev,
1409 					"overlapping ranges detected [ %llx - %llx ] | [%llx - %llx]",
1410 					ranges[j].base_address,
1411 					ranges[j].limit_address,
1412 					ranges[i].base_address,
1413 					ranges[i].limit_address);
1414 				ret = -EINVAL;
1415 				goto err;
1416 			}
1417 		}
1418 
1419 		mem_ranges[i].range.fpfn =
1420 			(ranges[i].base_address -
1421 			 adev->vm_manager.vram_base_offset) >>
1422 			AMDGPU_GPU_PAGE_SHIFT;
1423 		mem_ranges[i].range.lpfn =
1424 			(ranges[i].limit_address -
1425 			 adev->vm_manager.vram_base_offset) >>
1426 			AMDGPU_GPU_PAGE_SHIFT;
1427 		mem_ranges[i].size =
1428 			ranges[i].limit_address - ranges[i].base_address + 1;
1429 	}
1430 
1431 	if (!*exp_ranges)
1432 		*exp_ranges = range_cnt;
1433 err:
1434 	kfree(ranges);
1435 
1436 	return ret;
1437 }
1438 
1439 int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
1440 					int nps_mode)
1441 {
1442 	/* Not supported on VF devices and APUs */
1443 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
1444 		return -EOPNOTSUPP;
1445 
1446 	if (!adev->psp.funcs) {
1447 		dev_err(adev->dev,
1448 			"PSP interface not available for nps mode change request");
1449 		return -EINVAL;
1450 	}
1451 
1452 	return psp_memory_partition(&adev->psp, nps_mode);
1453 }
1454 
1455 static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev,
1456 						  int req_nps_mode,
1457 						  int cur_nps_mode)
1458 {
1459 	return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) ==
1460 			BIT(req_nps_mode)) &&
1461 		req_nps_mode != cur_nps_mode);
1462 }
1463 
1464 void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev)
1465 {
1466 	int req_nps_mode, cur_nps_mode, r;
1467 	struct amdgpu_hive_info *hive;
1468 
1469 	if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes ||
1470 	    !adev->gmc.gmc_funcs->request_mem_partition_mode)
1471 		return;
1472 
1473 	cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1474 	hive = amdgpu_get_xgmi_hive(adev);
1475 	if (hive) {
1476 		req_nps_mode = atomic_read(&hive->requested_nps_mode);
1477 		if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode,
1478 						    cur_nps_mode)) {
1479 			amdgpu_put_xgmi_hive(hive);
1480 			return;
1481 		}
1482 		r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode);
1483 		amdgpu_put_xgmi_hive(hive);
1484 		goto out;
1485 	}
1486 
1487 	req_nps_mode = adev->gmc.requested_nps_mode;
1488 	if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode))
1489 		return;
1490 
1491 	/* even if this fails, we should let driver unload w/o blocking */
1492 	r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode);
1493 out:
1494 	if (r)
1495 		dev_err(adev->dev, "NPS mode change request failed\n");
1496 	else
1497 		dev_info(
1498 			adev->dev,
1499 			"NPS mode change request done, reload driver to complete the change\n");
1500 }
1501 
1502 bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
1503 {
1504 	if (adev->gmc.gmc_funcs->need_reset_on_init)
1505 		return adev->gmc.gmc_funcs->need_reset_on_init(adev);
1506 
1507 	return false;
1508 }
1509 
1510 enum amdgpu_memory_partition
1511 amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
1512 {
1513 	switch (adev->gmc.num_mem_partitions) {
1514 	case 0:
1515 		return UNKNOWN_MEMORY_PARTITION_MODE;
1516 	case 1:
1517 		return AMDGPU_NPS1_PARTITION_MODE;
1518 	case 2:
1519 		return AMDGPU_NPS2_PARTITION_MODE;
1520 	case 4:
1521 		return AMDGPU_NPS4_PARTITION_MODE;
1522 	case 8:
1523 		return AMDGPU_NPS8_PARTITION_MODE;
1524 	default:
1525 		return AMDGPU_NPS1_PARTITION_MODE;
1526 	}
1527 }
1528 
1529 enum amdgpu_memory_partition
1530 amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1531 {
1532 	enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1533 
1534 	if (adev->nbio.funcs &&
1535 	    adev->nbio.funcs->get_memory_partition_mode)
1536 		mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1537 								   supp_modes);
1538 	else
1539 		dev_warn(adev->dev, "memory partition mode query is not supported\n");
1540 
1541 	return mode;
1542 }
1543 
1544 enum amdgpu_memory_partition
1545 amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
1546 {
1547 	if (amdgpu_sriov_vf(adev))
1548 		return amdgpu_gmc_get_vf_memory_partition(adev);
1549 	else
1550 		return amdgpu_gmc_get_memory_partition(adev, NULL);
1551 }
1552 
1553 static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
1554 {
1555 	enum amdgpu_memory_partition mode;
1556 	u32 supp_modes;
1557 	bool valid;
1558 
1559 	mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
1560 
1561 	/* Mode detected by hardware not present in supported modes */
1562 	if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1563 	    !(BIT(mode - 1) & supp_modes))
1564 		return false;
1565 
1566 	switch (mode) {
1567 	case UNKNOWN_MEMORY_PARTITION_MODE:
1568 	case AMDGPU_NPS1_PARTITION_MODE:
1569 		valid = (adev->gmc.num_mem_partitions == 1);
1570 		break;
1571 	case AMDGPU_NPS2_PARTITION_MODE:
1572 		valid = (adev->gmc.num_mem_partitions == 2);
1573 		break;
1574 	case AMDGPU_NPS4_PARTITION_MODE:
1575 		valid = (adev->gmc.num_mem_partitions == 3 ||
1576 			 adev->gmc.num_mem_partitions == 4);
1577 		break;
1578 	case AMDGPU_NPS8_PARTITION_MODE:
1579 		valid = (adev->gmc.num_mem_partitions == 8);
1580 		break;
1581 	default:
1582 		valid = false;
1583 	}
1584 
1585 	return valid;
1586 }
1587 
1588 static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
1589 {
1590 	int i;
1591 
1592 	/* Check if node with id 'nid' is present in 'node_ids' array */
1593 	for (i = 0; i < num_ids; ++i)
1594 		if (node_ids[i] == nid)
1595 			return true;
1596 
1597 	return false;
1598 }
1599 
1600 static void
1601 amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
1602 				struct amdgpu_mem_partition_info *mem_ranges)
1603 {
1604 	struct amdgpu_numa_info numa_info;
1605 	int node_ids[AMDGPU_MAX_MEM_RANGES];
1606 	int num_ranges = 0, ret;
1607 	int num_xcc, xcc_id;
1608 	uint32_t xcc_mask;
1609 
1610 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1611 	xcc_mask = (1U << num_xcc) - 1;
1612 
1613 	for_each_inst(xcc_id, xcc_mask)	{
1614 		ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1615 		if (ret)
1616 			continue;
1617 
1618 		if (numa_info.nid == NUMA_NO_NODE) {
1619 			mem_ranges[0].size = numa_info.size;
1620 			mem_ranges[0].numa.node = numa_info.nid;
1621 			num_ranges = 1;
1622 			break;
1623 		}
1624 
1625 		if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
1626 					     numa_info.nid))
1627 			continue;
1628 
1629 		node_ids[num_ranges] = numa_info.nid;
1630 		mem_ranges[num_ranges].numa.node = numa_info.nid;
1631 		mem_ranges[num_ranges].size = numa_info.size;
1632 		++num_ranges;
1633 	}
1634 
1635 	adev->gmc.num_mem_partitions = num_ranges;
1636 }
1637 
1638 void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
1639 				   struct amdgpu_mem_partition_info *mem_ranges)
1640 {
1641 	enum amdgpu_memory_partition mode;
1642 	u32 start_addr = 0, size;
1643 	int i, r, l;
1644 
1645 	mode = amdgpu_gmc_query_memory_partition(adev);
1646 
1647 	switch (mode) {
1648 	case UNKNOWN_MEMORY_PARTITION_MODE:
1649 		adev->gmc.num_mem_partitions = 0;
1650 		break;
1651 	case AMDGPU_NPS1_PARTITION_MODE:
1652 		adev->gmc.num_mem_partitions = 1;
1653 		break;
1654 	case AMDGPU_NPS2_PARTITION_MODE:
1655 		adev->gmc.num_mem_partitions = 2;
1656 		break;
1657 	case AMDGPU_NPS4_PARTITION_MODE:
1658 		if (adev->flags & AMD_IS_APU)
1659 			adev->gmc.num_mem_partitions = 3;
1660 		else
1661 			adev->gmc.num_mem_partitions = 4;
1662 		break;
1663 	case AMDGPU_NPS8_PARTITION_MODE:
1664 		adev->gmc.num_mem_partitions = 8;
1665 		break;
1666 	default:
1667 		adev->gmc.num_mem_partitions = 1;
1668 		break;
1669 	}
1670 
1671 	/* Use NPS range info, if populated */
1672 	r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
1673 					 &adev->gmc.num_mem_partitions);
1674 	if (!r) {
1675 		l = 0;
1676 		for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
1677 			if (mem_ranges[i].range.lpfn >
1678 			    mem_ranges[i - 1].range.lpfn)
1679 				l = i;
1680 		}
1681 
1682 	} else {
1683 		if (!adev->gmc.num_mem_partitions) {
1684 			dev_warn(adev->dev,
1685 				 "Not able to detect NPS mode, fall back to NPS1\n");
1686 			adev->gmc.num_mem_partitions = 1;
1687 		}
1688 		/* Fallback to sw based calculation */
1689 		size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
1690 		size /= adev->gmc.num_mem_partitions;
1691 
1692 		for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1693 			mem_ranges[i].range.fpfn = start_addr;
1694 			mem_ranges[i].size =
1695 				((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1696 			mem_ranges[i].range.lpfn = start_addr + size - 1;
1697 			start_addr += size;
1698 		}
1699 
1700 		l = adev->gmc.num_mem_partitions - 1;
1701 	}
1702 
1703 	/* Adjust the last one */
1704 	mem_ranges[l].range.lpfn =
1705 		(adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1706 	mem_ranges[l].size =
1707 		adev->gmc.real_vram_size -
1708 		((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
1709 }
1710 
1711 int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
1712 {
1713 	bool valid;
1714 
1715 	adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
1716 					   sizeof(struct amdgpu_mem_partition_info),
1717 					   GFP_KERNEL);
1718 	if (!adev->gmc.mem_partitions)
1719 		return -ENOMEM;
1720 
1721 	if (adev->gmc.is_app_apu)
1722 		amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1723 	else
1724 		amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1725 
1726 	if (amdgpu_sriov_vf(adev))
1727 		valid = true;
1728 	else
1729 		valid = amdgpu_gmc_validate_partition_info(adev);
1730 	if (!valid) {
1731 		/* TODO: handle invalid case */
1732 		dev_warn(adev->dev,
1733 			 "Mem ranges not matching with hardware config\n");
1734 	}
1735 
1736 	return 0;
1737 }
1738