xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu.h (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30 
31 #ifdef pr_fmt
32 #undef pr_fmt
33 #endif
34 
35 #define pr_fmt(fmt) "amdgpu: " fmt
36 
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 
41 #define dev_fmt(fmt) "amdgpu: " fmt
42 
43 #include "amdgpu_ctx.h"
44 
45 #include <linux/atomic.h>
46 #include <linux/wait.h>
47 #include <linux/list.h>
48 #include <linux/kref.h>
49 #include <linux/rbtree.h>
50 #include <linux/hashtable.h>
51 #include <linux/dma-fence.h>
52 #include <linux/pci.h>
53 
54 #include <drm/ttm/ttm_bo.h>
55 #include <drm/ttm/ttm_placement.h>
56 
57 #include <drm/amdgpu_drm.h>
58 #include <drm/drm_gem.h>
59 #include <drm/drm_ioctl.h>
60 
61 #include <kgd_kfd_interface.h>
62 #include "dm_pp_interface.h"
63 #include "kgd_pp_interface.h"
64 
65 #include "amd_shared.h"
66 #include "amdgpu_mode.h"
67 #include "amdgpu_ih.h"
68 #include "amdgpu_irq.h"
69 #include "amdgpu_ucode.h"
70 #include "amdgpu_ttm.h"
71 #include "amdgpu_psp.h"
72 #include "amdgpu_gds.h"
73 #include "amdgpu_sync.h"
74 #include "amdgpu_ring.h"
75 #include "amdgpu_vm.h"
76 #include "amdgpu_dpm.h"
77 #include "amdgpu_acp.h"
78 #include "amdgpu_uvd.h"
79 #include "amdgpu_vce.h"
80 #include "amdgpu_vcn.h"
81 #include "amdgpu_jpeg.h"
82 #include "amdgpu_vpe.h"
83 #include "amdgpu_umsch_mm.h"
84 #include "amdgpu_gmc.h"
85 #include "amdgpu_gfx.h"
86 #include "amdgpu_sdma.h"
87 #include "amdgpu_lsdma.h"
88 #include "amdgpu_nbio.h"
89 #include "amdgpu_hdp.h"
90 #include "amdgpu_dm.h"
91 #include "amdgpu_virt.h"
92 #include "amdgpu_csa.h"
93 #include "amdgpu_mes_ctx.h"
94 #include "amdgpu_gart.h"
95 #include "amdgpu_debugfs.h"
96 #include "amdgpu_job.h"
97 #include "amdgpu_bo_list.h"
98 #include "amdgpu_gem.h"
99 #include "amdgpu_doorbell.h"
100 #include "amdgpu_amdkfd.h"
101 #include "amdgpu_discovery.h"
102 #include "amdgpu_mes.h"
103 #include "amdgpu_umc.h"
104 #include "amdgpu_mmhub.h"
105 #include "amdgpu_gfxhub.h"
106 #include "amdgpu_df.h"
107 #include "amdgpu_smuio.h"
108 #include "amdgpu_fdinfo.h"
109 #include "amdgpu_mca.h"
110 #include "amdgpu_aca.h"
111 #include "amdgpu_ras.h"
112 #include "amdgpu_cper.h"
113 #include "amdgpu_xcp.h"
114 #include "amdgpu_seq64.h"
115 #include "amdgpu_reg_state.h"
116 #include "amdgpu_userq.h"
117 #include "amdgpu_eviction_fence.h"
118 #if defined(CONFIG_DRM_AMD_ISP)
119 #include "amdgpu_isp.h"
120 #endif
121 
122 #define MAX_GPU_INSTANCE		64
123 
124 #define GFX_SLICE_PERIOD_MS		250
125 
126 struct amdgpu_gpu_instance {
127 	struct amdgpu_device		*adev;
128 	int				mgpu_fan_enabled;
129 };
130 
131 struct amdgpu_mgpu_info {
132 	struct amdgpu_gpu_instance	gpu_ins[MAX_GPU_INSTANCE];
133 	struct mutex			mutex;
134 	uint32_t			num_gpu;
135 	uint32_t			num_dgpu;
136 	uint32_t			num_apu;
137 };
138 
139 enum amdgpu_ss {
140 	AMDGPU_SS_DRV_LOAD,
141 	AMDGPU_SS_DEV_D0,
142 	AMDGPU_SS_DEV_D3,
143 	AMDGPU_SS_DRV_UNLOAD
144 };
145 
146 struct amdgpu_hwip_reg_entry {
147 	u32		hwip;
148 	u32		inst;
149 	u32		seg;
150 	u32		reg_offset;
151 	const char	*reg_name;
152 };
153 
154 struct amdgpu_watchdog_timer {
155 	bool timeout_fatal_disable;
156 	uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
157 };
158 
159 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH	256
160 
161 /*
162  * Modules parameters.
163  */
164 extern int amdgpu_modeset;
165 extern unsigned int amdgpu_vram_limit;
166 extern int amdgpu_vis_vram_limit;
167 extern int amdgpu_gart_size;
168 extern int amdgpu_gtt_size;
169 extern int amdgpu_moverate;
170 extern int amdgpu_audio;
171 extern int amdgpu_disp_priority;
172 extern int amdgpu_hw_i2c;
173 extern int amdgpu_pcie_gen2;
174 extern int amdgpu_msi;
175 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
176 extern int amdgpu_dpm;
177 extern int amdgpu_fw_load_type;
178 extern int amdgpu_aspm;
179 extern int amdgpu_runtime_pm;
180 extern uint amdgpu_ip_block_mask;
181 extern int amdgpu_bapm;
182 extern int amdgpu_deep_color;
183 extern int amdgpu_vm_size;
184 extern int amdgpu_vm_block_size;
185 extern int amdgpu_vm_fragment_size;
186 extern int amdgpu_vm_fault_stop;
187 extern int amdgpu_vm_debug;
188 extern int amdgpu_vm_update_mode;
189 extern int amdgpu_exp_hw_support;
190 extern int amdgpu_dc;
191 extern int amdgpu_sched_jobs;
192 extern int amdgpu_sched_hw_submission;
193 extern uint amdgpu_pcie_gen_cap;
194 extern uint amdgpu_pcie_lane_cap;
195 extern u64 amdgpu_cg_mask;
196 extern uint amdgpu_pg_mask;
197 extern uint amdgpu_sdma_phase_quantum;
198 extern char *amdgpu_disable_cu;
199 extern char *amdgpu_virtual_display;
200 extern uint amdgpu_pp_feature_mask;
201 extern uint amdgpu_force_long_training;
202 extern int amdgpu_lbpw;
203 extern int amdgpu_compute_multipipe;
204 extern int amdgpu_gpu_recovery;
205 extern int amdgpu_emu_mode;
206 extern uint amdgpu_smu_memory_pool_size;
207 extern int amdgpu_smu_pptable_id;
208 extern uint amdgpu_dc_feature_mask;
209 extern uint amdgpu_freesync_vid_mode;
210 extern uint amdgpu_dc_debug_mask;
211 extern uint amdgpu_dc_visual_confirm;
212 extern int amdgpu_dm_abm_level;
213 extern int amdgpu_backlight;
214 extern int amdgpu_damage_clips;
215 extern struct amdgpu_mgpu_info mgpu_info;
216 extern int amdgpu_ras_enable;
217 extern uint amdgpu_ras_mask;
218 extern int amdgpu_bad_page_threshold;
219 extern bool amdgpu_ignore_bad_page_threshold;
220 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
221 extern int amdgpu_async_gfx_ring;
222 extern int amdgpu_mcbp;
223 extern int amdgpu_discovery;
224 extern int amdgpu_mes;
225 extern int amdgpu_mes_log_enable;
226 extern int amdgpu_mes_kiq;
227 extern int amdgpu_uni_mes;
228 extern int amdgpu_noretry;
229 extern int amdgpu_force_asic_type;
230 extern int amdgpu_smartshift_bias;
231 extern int amdgpu_use_xgmi_p2p;
232 extern int amdgpu_mtype_local;
233 extern int amdgpu_enforce_isolation;
234 #ifdef CONFIG_HSA_AMD
235 extern int sched_policy;
236 extern bool debug_evictions;
237 extern bool no_system_mem_limit;
238 extern int halt_if_hws_hang;
239 extern uint amdgpu_svm_default_granularity;
240 #else
241 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
242 static const bool __maybe_unused debug_evictions; /* = false */
243 static const bool __maybe_unused no_system_mem_limit;
244 static const int __maybe_unused halt_if_hws_hang;
245 #endif
246 #ifdef CONFIG_HSA_AMD_P2P
247 extern bool pcie_p2p;
248 #endif
249 
250 extern int amdgpu_tmz;
251 extern int amdgpu_reset_method;
252 
253 #ifdef CONFIG_DRM_AMDGPU_SI
254 extern int amdgpu_si_support;
255 #endif
256 #ifdef CONFIG_DRM_AMDGPU_CIK
257 extern int amdgpu_cik_support;
258 #endif
259 extern int amdgpu_num_kcq;
260 
261 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
262 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024)
263 extern int amdgpu_vcnfw_log;
264 extern int amdgpu_sg_display;
265 extern int amdgpu_umsch_mm;
266 extern int amdgpu_seamless;
267 extern int amdgpu_umsch_mm_fwlog;
268 
269 extern int amdgpu_user_partt_mode;
270 extern int amdgpu_agp;
271 extern int amdgpu_rebar;
272 
273 extern int amdgpu_wbrf;
274 extern int amdgpu_user_queue;
275 
276 #define AMDGPU_VM_MAX_NUM_CTX			4096
277 #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
278 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
279 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
280 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
281 #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
282 #define AMDGPUFB_CONN_LIMIT			4
283 #define AMDGPU_BIOS_NUM_SCRATCH			16
284 
285 #define AMDGPU_VBIOS_VGA_ALLOCATION		(9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
286 
287 /* hard reset data */
288 #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
289 
290 /* reset flags */
291 #define AMDGPU_RESET_GFX			(1 << 0)
292 #define AMDGPU_RESET_COMPUTE			(1 << 1)
293 #define AMDGPU_RESET_DMA			(1 << 2)
294 #define AMDGPU_RESET_CP				(1 << 3)
295 #define AMDGPU_RESET_GRBM			(1 << 4)
296 #define AMDGPU_RESET_DMA1			(1 << 5)
297 #define AMDGPU_RESET_RLC			(1 << 6)
298 #define AMDGPU_RESET_SEM			(1 << 7)
299 #define AMDGPU_RESET_IH				(1 << 8)
300 #define AMDGPU_RESET_VMC			(1 << 9)
301 #define AMDGPU_RESET_MC				(1 << 10)
302 #define AMDGPU_RESET_DISPLAY			(1 << 11)
303 #define AMDGPU_RESET_UVD			(1 << 12)
304 #define AMDGPU_RESET_VCE			(1 << 13)
305 #define AMDGPU_RESET_VCE1			(1 << 14)
306 
307 /* reset mask */
308 #define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
309 #define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
310 #define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
311 #define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
312 
313 /* max cursor sizes (in pixels) */
314 #define CIK_CURSOR_WIDTH 128
315 #define CIK_CURSOR_HEIGHT 128
316 
317 /* smart shift bias level limits */
318 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
319 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
320 
321 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
322 #define AMDGPU_SWCTF_EXTRA_DELAY		50
323 
324 struct amdgpu_xcp_mgr;
325 struct amdgpu_device;
326 struct amdgpu_irq_src;
327 struct amdgpu_fpriv;
328 struct amdgpu_bo_va_mapping;
329 struct kfd_vm_fault_info;
330 struct amdgpu_hive_info;
331 struct amdgpu_reset_context;
332 struct amdgpu_reset_control;
333 
334 enum amdgpu_cp_irq {
335 	AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
336 	AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
337 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
338 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
339 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
340 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
341 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
342 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
343 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
344 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
345 
346 	AMDGPU_CP_IRQ_LAST
347 };
348 
349 enum amdgpu_thermal_irq {
350 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
351 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
352 
353 	AMDGPU_THERMAL_IRQ_LAST
354 };
355 
356 enum amdgpu_kiq_irq {
357 	AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
358 	AMDGPU_CP_KIQ_IRQ_LAST
359 };
360 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
361 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
362 #define MAX_KIQ_REG_TRY 1000
363 
364 int amdgpu_device_ip_set_clockgating_state(void *dev,
365 					   enum amd_ip_block_type block_type,
366 					   enum amd_clockgating_state state);
367 int amdgpu_device_ip_set_powergating_state(void *dev,
368 					   enum amd_ip_block_type block_type,
369 					   enum amd_powergating_state state);
370 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
371 					    u64 *flags);
372 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
373 				   enum amd_ip_block_type block_type);
374 bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
375 			      enum amd_ip_block_type block_type);
376 int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
377 
378 int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
379 
380 #define AMDGPU_MAX_IP_NUM 16
381 
382 struct amdgpu_ip_block_status {
383 	bool valid;
384 	bool sw;
385 	bool hw;
386 	bool late_initialized;
387 	bool hang;
388 };
389 
390 struct amdgpu_ip_block_version {
391 	const enum amd_ip_block_type type;
392 	const u32 major;
393 	const u32 minor;
394 	const u32 rev;
395 	const struct amd_ip_funcs *funcs;
396 };
397 
398 struct amdgpu_ip_block {
399 	struct amdgpu_ip_block_status status;
400 	const struct amdgpu_ip_block_version *version;
401 	struct amdgpu_device *adev;
402 };
403 
404 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
405 				       enum amd_ip_block_type type,
406 				       u32 major, u32 minor);
407 
408 struct amdgpu_ip_block *
409 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
410 			      enum amd_ip_block_type type);
411 
412 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
413 			       const struct amdgpu_ip_block_version *ip_block_version);
414 
415 /*
416  * BIOS.
417  */
418 bool amdgpu_get_bios(struct amdgpu_device *adev);
419 bool amdgpu_read_bios(struct amdgpu_device *adev);
420 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
421 				     u8 *bios, u32 length_bytes);
422 void amdgpu_bios_release(struct amdgpu_device *adev);
423 /*
424  * Clocks
425  */
426 
427 #define AMDGPU_MAX_PPLL 3
428 
429 struct amdgpu_clock {
430 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
431 	struct amdgpu_pll spll;
432 	struct amdgpu_pll mpll;
433 	/* 10 Khz units */
434 	uint32_t default_mclk;
435 	uint32_t default_sclk;
436 	uint32_t default_dispclk;
437 	uint32_t current_dispclk;
438 	uint32_t dp_extclk;
439 	uint32_t max_pixel_clock;
440 };
441 
442 /* sub-allocation manager, it has to be protected by another lock.
443  * By conception this is an helper for other part of the driver
444  * like the indirect buffer or semaphore, which both have their
445  * locking.
446  *
447  * Principe is simple, we keep a list of sub allocation in offset
448  * order (first entry has offset == 0, last entry has the highest
449  * offset).
450  *
451  * When allocating new object we first check if there is room at
452  * the end total_size - (last_object_offset + last_object_size) >=
453  * alloc_size. If so we allocate new object there.
454  *
455  * When there is not enough room at the end, we start waiting for
456  * each sub object until we reach object_offset+object_size >=
457  * alloc_size, this object then become the sub object we return.
458  *
459  * Alignment can't be bigger than page size.
460  *
461  * Hole are not considered for allocation to keep things simple.
462  * Assumption is that there won't be hole (all object on same
463  * alignment).
464  */
465 
466 struct amdgpu_sa_manager {
467 	struct drm_suballoc_manager	base;
468 	struct amdgpu_bo		*bo;
469 	uint64_t			gpu_addr;
470 	void				*cpu_ptr;
471 };
472 
473 int amdgpu_fence_slab_init(void);
474 void amdgpu_fence_slab_fini(void);
475 
476 /*
477  * IRQS.
478  */
479 
480 struct amdgpu_flip_work {
481 	struct delayed_work		flip_work;
482 	struct work_struct		unpin_work;
483 	struct amdgpu_device		*adev;
484 	int				crtc_id;
485 	u32				target_vblank;
486 	uint64_t			base;
487 	struct drm_pending_vblank_event *event;
488 	struct amdgpu_bo		*old_abo;
489 	unsigned			shared_count;
490 	struct dma_fence		**shared;
491 	struct dma_fence_cb		cb;
492 	bool				async;
493 };
494 
495 /*
496  * file private structure
497  */
498 
499 struct amdgpu_fpriv {
500 	struct amdgpu_vm	vm;
501 	struct amdgpu_bo_va	*prt_va;
502 	struct amdgpu_bo_va	*csa_va;
503 	struct amdgpu_bo_va	*seq64_va;
504 	struct mutex		bo_list_lock;
505 	struct idr		bo_list_handles;
506 	struct amdgpu_ctx_mgr	ctx_mgr;
507 	struct amdgpu_userq_mgr	userq_mgr;
508 
509 	/* Eviction fence infra */
510 	struct amdgpu_eviction_fence_mgr evf_mgr;
511 
512 	/** GPU partition selection */
513 	uint32_t		xcp_id;
514 };
515 
516 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
517 
518 /*
519  * Writeback
520  */
521 #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
522 
523 /**
524  * amdgpu_wb - This struct is used for small GPU memory allocation.
525  *
526  * This struct is used to allocate a small amount of GPU memory that can be
527  * used to shadow certain states into the memory. This is especially useful for
528  * providing easy CPU access to some states without requiring register access
529  * (e.g., if some block is power gated, reading register may be problematic).
530  *
531  * Note: the term writeback was initially used because many of the amdgpu
532  * components had some level of writeback memory, and this struct initially
533  * described those components.
534  */
535 struct amdgpu_wb {
536 
537 	/**
538 	 * @wb_obj:
539 	 *
540 	 * Buffer Object used for the writeback memory.
541 	 */
542 	struct amdgpu_bo	*wb_obj;
543 
544 	/**
545 	 * @wb:
546 	 *
547 	 * Pointer to the first writeback slot. In terms of CPU address
548 	 * this value can be accessed directly by using the offset as an index.
549 	 * For the GPU address, it is necessary to use gpu_addr and the offset.
550 	 */
551 	volatile uint32_t	*wb;
552 
553 	/**
554 	 * @gpu_addr:
555 	 *
556 	 * Writeback base address in the GPU.
557 	 */
558 	uint64_t		gpu_addr;
559 
560 	/**
561 	 * @num_wb:
562 	 *
563 	 * Number of writeback slots reserved for amdgpu.
564 	 */
565 	u32			num_wb;
566 
567 	/**
568 	 * @used:
569 	 *
570 	 * Track the writeback slot already used.
571 	 */
572 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
573 
574 	/**
575 	 * @lock:
576 	 *
577 	 * Protects read and write of the used field array.
578 	 */
579 	spinlock_t		lock;
580 };
581 
582 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
583 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
584 
585 /*
586  * Benchmarking
587  */
588 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
589 
590 /*
591  * ASIC specific register table accessible by UMD
592  */
593 struct amdgpu_allowed_register_entry {
594 	uint32_t reg_offset;
595 	bool grbm_indexed;
596 };
597 
598 /**
599  * enum amd_reset_method - Methods for resetting AMD GPU devices
600  *
601  * @AMD_RESET_METHOD_NONE: The device will not be reset.
602  * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
603  * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
604  *                   any device.
605  * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
606  *                   individually. Suitable only for some discrete GPU, not
607  *                   available for all ASICs.
608  * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
609  *                   are reset depends on the ASIC. Notably doesn't reset IPs
610  *                   shared with the CPU on APUs or the memory controllers (so
611  *                   VRAM is not lost). Not available on all ASICs.
612  * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
613  * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
614  *                  but without powering off the PCI bus. Suitable only for
615  *                  discrete GPUs.
616  * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
617  *                 and does a secondary bus reset or FLR, depending on what the
618  *                 underlying hardware supports.
619  *
620  * Methods available for AMD GPU driver for resetting the device. Not all
621  * methods are suitable for every device. User can override the method using
622  * module parameter `reset_method`.
623  */
624 enum amd_reset_method {
625 	AMD_RESET_METHOD_NONE = -1,
626 	AMD_RESET_METHOD_LEGACY = 0,
627 	AMD_RESET_METHOD_MODE0,
628 	AMD_RESET_METHOD_MODE1,
629 	AMD_RESET_METHOD_MODE2,
630 	AMD_RESET_METHOD_LINK,
631 	AMD_RESET_METHOD_BACO,
632 	AMD_RESET_METHOD_PCI,
633 	AMD_RESET_METHOD_ON_INIT,
634 };
635 
636 struct amdgpu_video_codec_info {
637 	u32 codec_type;
638 	u32 max_width;
639 	u32 max_height;
640 	u32 max_pixels_per_frame;
641 	u32 max_level;
642 };
643 
644 #define codec_info_build(type, width, height, level) \
645 			 .codec_type = type,\
646 			 .max_width = width,\
647 			 .max_height = height,\
648 			 .max_pixels_per_frame = height * width,\
649 			 .max_level = level,
650 
651 struct amdgpu_video_codecs {
652 	const u32 codec_count;
653 	const struct amdgpu_video_codec_info *codec_array;
654 };
655 
656 /*
657  * ASIC specific functions.
658  */
659 struct amdgpu_asic_funcs {
660 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
661 	bool (*read_bios_from_rom)(struct amdgpu_device *adev,
662 				   u8 *bios, u32 length_bytes);
663 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
664 			     u32 sh_num, u32 reg_offset, u32 *value);
665 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
666 	int (*reset)(struct amdgpu_device *adev);
667 	enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
668 	/* get the reference clock */
669 	u32 (*get_xclk)(struct amdgpu_device *adev);
670 	/* MM block clocks */
671 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
672 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
673 	/* static power management */
674 	int (*get_pcie_lanes)(struct amdgpu_device *adev);
675 	void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
676 	/* get config memsize register */
677 	u32 (*get_config_memsize)(struct amdgpu_device *adev);
678 	/* flush hdp write queue */
679 	void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
680 	/* invalidate hdp read cache */
681 	void (*invalidate_hdp)(struct amdgpu_device *adev,
682 			       struct amdgpu_ring *ring);
683 	/* check if the asic needs a full reset of if soft reset will work */
684 	bool (*need_full_reset)(struct amdgpu_device *adev);
685 	/* initialize doorbell layout for specific asic*/
686 	void (*init_doorbell_index)(struct amdgpu_device *adev);
687 	/* PCIe bandwidth usage */
688 	void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
689 			       uint64_t *count1);
690 	/* do we need to reset the asic at init time (e.g., kexec) */
691 	bool (*need_reset_on_init)(struct amdgpu_device *adev);
692 	/* PCIe replay counter */
693 	uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
694 	/* device supports BACO */
695 	int (*supports_baco)(struct amdgpu_device *adev);
696 	/* pre asic_init quirks */
697 	void (*pre_asic_init)(struct amdgpu_device *adev);
698 	/* enter/exit umd stable pstate */
699 	int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
700 	/* query video codecs */
701 	int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
702 				  const struct amdgpu_video_codecs **codecs);
703 	/* encode "> 32bits" smn addressing */
704 	u64 (*encode_ext_smn_addressing)(int ext_id);
705 
706 	ssize_t (*get_reg_state)(struct amdgpu_device *adev,
707 				 enum amdgpu_reg_state reg_state, void *buf,
708 				 size_t max_size);
709 };
710 
711 /*
712  * IOCTL.
713  */
714 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
715 				struct drm_file *filp);
716 
717 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
718 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
719 				    struct drm_file *filp);
720 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
721 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
722 				struct drm_file *filp);
723 
724 /* VRAM scratch page for HDP bug, default vram page */
725 struct amdgpu_mem_scratch {
726 	struct amdgpu_bo		*robj;
727 	volatile uint32_t		*ptr;
728 	u64				gpu_addr;
729 };
730 
731 /*
732  * CGS
733  */
734 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
735 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
736 
737 /*
738  * Core structure, functions and helpers.
739  */
740 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
741 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
742 
743 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
744 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
745 
746 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
747 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
748 
749 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t);
750 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t);
751 
752 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
753 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
754 
755 struct amdgpu_mmio_remap {
756 	u32 reg_offset;
757 	resource_size_t bus_addr;
758 };
759 
760 /* Define the HW IP blocks will be used in driver , add more if necessary */
761 enum amd_hw_ip_block_type {
762 	GC_HWIP = 1,
763 	HDP_HWIP,
764 	SDMA0_HWIP,
765 	SDMA1_HWIP,
766 	SDMA2_HWIP,
767 	SDMA3_HWIP,
768 	SDMA4_HWIP,
769 	SDMA5_HWIP,
770 	SDMA6_HWIP,
771 	SDMA7_HWIP,
772 	LSDMA_HWIP,
773 	MMHUB_HWIP,
774 	ATHUB_HWIP,
775 	NBIO_HWIP,
776 	MP0_HWIP,
777 	MP1_HWIP,
778 	UVD_HWIP,
779 	VCN_HWIP = UVD_HWIP,
780 	JPEG_HWIP = VCN_HWIP,
781 	VCN1_HWIP,
782 	VCE_HWIP,
783 	VPE_HWIP,
784 	DF_HWIP,
785 	DCE_HWIP,
786 	OSSSYS_HWIP,
787 	SMUIO_HWIP,
788 	PWR_HWIP,
789 	NBIF_HWIP,
790 	THM_HWIP,
791 	CLK_HWIP,
792 	UMC_HWIP,
793 	RSMU_HWIP,
794 	XGMI_HWIP,
795 	DCI_HWIP,
796 	PCIE_HWIP,
797 	ISP_HWIP,
798 	MAX_HWIP
799 };
800 
801 #define HWIP_MAX_INSTANCE	44
802 
803 #define HW_ID_MAX		300
804 #define IP_VERSION_FULL(mj, mn, rv, var, srev) \
805 	(((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev))
806 #define IP_VERSION(mj, mn, rv)		IP_VERSION_FULL(mj, mn, rv, 0, 0)
807 #define IP_VERSION_MAJ(ver)		((ver) >> 24)
808 #define IP_VERSION_MIN(ver)		(((ver) >> 16) & 0xFF)
809 #define IP_VERSION_REV(ver)		(((ver) >> 8) & 0xFF)
810 #define IP_VERSION_VARIANT(ver)		(((ver) >> 4) & 0xF)
811 #define IP_VERSION_SUBREV(ver)		((ver) & 0xF)
812 #define IP_VERSION_MAJ_MIN_REV(ver)	((ver) >> 8)
813 
814 struct amdgpu_ip_map_info {
815 	/* Map of logical to actual dev instances/mask */
816 	uint32_t 		dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
817 	int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
818 				      enum amd_hw_ip_block_type block,
819 				      int8_t inst);
820 	uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
821 					enum amd_hw_ip_block_type block,
822 					uint32_t mask);
823 };
824 
825 struct amd_powerplay {
826 	void *pp_handle;
827 	const struct amd_pm_funcs *pp_funcs;
828 };
829 
830 struct ip_discovery_top;
831 
832 /* polaris10 kickers */
833 #define ASICID_IS_P20(did, rid)		(((did == 0x67DF) && \
834 					 ((rid == 0xE3) || \
835 					  (rid == 0xE4) || \
836 					  (rid == 0xE5) || \
837 					  (rid == 0xE7) || \
838 					  (rid == 0xEF))) || \
839 					 ((did == 0x6FDF) && \
840 					 ((rid == 0xE7) || \
841 					  (rid == 0xEF) || \
842 					  (rid == 0xFF))))
843 
844 #define ASICID_IS_P30(did, rid)		((did == 0x67DF) && \
845 					((rid == 0xE1) || \
846 					 (rid == 0xF7)))
847 
848 /* polaris11 kickers */
849 #define ASICID_IS_P21(did, rid)		(((did == 0x67EF) && \
850 					 ((rid == 0xE0) || \
851 					  (rid == 0xE5))) || \
852 					 ((did == 0x67FF) && \
853 					 ((rid == 0xCF) || \
854 					  (rid == 0xEF) || \
855 					  (rid == 0xFF))))
856 
857 #define ASICID_IS_P31(did, rid)		((did == 0x67EF) && \
858 					((rid == 0xE2)))
859 
860 /* polaris12 kickers */
861 #define ASICID_IS_P23(did, rid)		(((did == 0x6987) && \
862 					 ((rid == 0xC0) || \
863 					  (rid == 0xC1) || \
864 					  (rid == 0xC3) || \
865 					  (rid == 0xC7))) || \
866 					 ((did == 0x6981) && \
867 					 ((rid == 0x00) || \
868 					  (rid == 0x01) || \
869 					  (rid == 0x10))))
870 
871 struct amdgpu_mqd_prop {
872 	uint64_t mqd_gpu_addr;
873 	uint64_t hqd_base_gpu_addr;
874 	uint64_t rptr_gpu_addr;
875 	uint64_t wptr_gpu_addr;
876 	uint32_t queue_size;
877 	bool use_doorbell;
878 	uint32_t doorbell_index;
879 	uint64_t eop_gpu_addr;
880 	uint32_t hqd_pipe_priority;
881 	uint32_t hqd_queue_priority;
882 	bool allow_tunneling;
883 	bool hqd_active;
884 	uint64_t shadow_addr;
885 	uint64_t gds_bkup_addr;
886 	uint64_t csa_addr;
887 	uint64_t fence_address;
888 	bool tmz_queue;
889 };
890 
891 struct amdgpu_mqd {
892 	unsigned mqd_size;
893 	int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
894 			struct amdgpu_mqd_prop *p);
895 };
896 
897 struct amdgpu_pcie_reset_ctx {
898 	bool in_link_reset;
899 	bool occurs_dpc;
900 	bool audio_suspended;
901 };
902 
903 /*
904  * Custom Init levels could be defined for different situations where a full
905  * initialization of all hardware blocks are not expected. Sample cases are
906  * custom init sequences after resume after S0i3/S3, reset on initialization,
907  * partial reset of blocks etc. Presently, this defines only two levels. Levels
908  * are described in corresponding struct definitions - amdgpu_init_default,
909  * amdgpu_init_minimal_xgmi.
910  */
911 enum amdgpu_init_lvl_id {
912 	AMDGPU_INIT_LEVEL_DEFAULT,
913 	AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
914 	AMDGPU_INIT_LEVEL_RESET_RECOVERY,
915 };
916 
917 struct amdgpu_init_level {
918 	enum amdgpu_init_lvl_id level;
919 	uint32_t hwini_ip_block_mask;
920 };
921 
922 #define AMDGPU_RESET_MAGIC_NUM 64
923 #define AMDGPU_MAX_DF_PERFMONS 4
924 struct amdgpu_reset_domain;
925 struct amdgpu_fru_info;
926 
927 enum amdgpu_enforce_isolation_mode {
928 	AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
929 	AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
930 	AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
931 	AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
932 };
933 
934 
935 /*
936  * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
937  */
938 #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
939 
940 struct amdgpu_device {
941 	struct device			*dev;
942 	struct pci_dev			*pdev;
943 	struct drm_device		ddev;
944 
945 #ifdef CONFIG_DRM_AMD_ACP
946 	struct amdgpu_acp		acp;
947 #endif
948 	struct amdgpu_hive_info *hive;
949 	struct amdgpu_xcp_mgr *xcp_mgr;
950 	/* ASIC */
951 	enum amd_asic_type		asic_type;
952 	uint32_t			family;
953 	uint32_t			rev_id;
954 	uint32_t			external_rev_id;
955 	unsigned long			flags;
956 	unsigned long			apu_flags;
957 	int				usec_timeout;
958 	const struct amdgpu_asic_funcs	*asic_funcs;
959 	bool				shutdown;
960 	bool				need_swiotlb;
961 	bool				accel_working;
962 	struct notifier_block		acpi_nb;
963 	struct notifier_block		pm_nb;
964 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
965 	struct debugfs_blob_wrapper     debugfs_vbios_blob;
966 	struct debugfs_blob_wrapper     debugfs_discovery_blob;
967 	struct mutex			srbm_mutex;
968 	/* GRBM index mutex. Protects concurrent access to GRBM index */
969 	struct mutex                    grbm_idx_mutex;
970 	struct dev_pm_domain		vga_pm_domain;
971 	bool				have_disp_power_ref;
972 	bool                            have_atomics_support;
973 
974 	/* BIOS */
975 	bool				is_atom_fw;
976 	uint8_t				*bios;
977 	uint32_t			bios_size;
978 	uint32_t			bios_scratch_reg_offset;
979 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
980 
981 	/* Register/doorbell mmio */
982 	resource_size_t			rmmio_base;
983 	resource_size_t			rmmio_size;
984 	void __iomem			*rmmio;
985 	/* protects concurrent MM_INDEX/DATA based register access */
986 	spinlock_t mmio_idx_lock;
987 	struct amdgpu_mmio_remap        rmmio_remap;
988 	/* protects concurrent SMC based register access */
989 	spinlock_t smc_idx_lock;
990 	amdgpu_rreg_t			smc_rreg;
991 	amdgpu_wreg_t			smc_wreg;
992 	/* protects concurrent PCIE register access */
993 	spinlock_t pcie_idx_lock;
994 	amdgpu_rreg_t			pcie_rreg;
995 	amdgpu_wreg_t			pcie_wreg;
996 	amdgpu_rreg_t			pciep_rreg;
997 	amdgpu_wreg_t			pciep_wreg;
998 	amdgpu_rreg_ext_t		pcie_rreg_ext;
999 	amdgpu_wreg_ext_t		pcie_wreg_ext;
1000 	amdgpu_rreg64_t			pcie_rreg64;
1001 	amdgpu_wreg64_t			pcie_wreg64;
1002 	amdgpu_rreg64_ext_t			pcie_rreg64_ext;
1003 	amdgpu_wreg64_ext_t			pcie_wreg64_ext;
1004 	/* protects concurrent UVD register access */
1005 	spinlock_t uvd_ctx_idx_lock;
1006 	amdgpu_rreg_t			uvd_ctx_rreg;
1007 	amdgpu_wreg_t			uvd_ctx_wreg;
1008 	/* protects concurrent DIDT register access */
1009 	spinlock_t didt_idx_lock;
1010 	amdgpu_rreg_t			didt_rreg;
1011 	amdgpu_wreg_t			didt_wreg;
1012 	/* protects concurrent gc_cac register access */
1013 	spinlock_t gc_cac_idx_lock;
1014 	amdgpu_rreg_t			gc_cac_rreg;
1015 	amdgpu_wreg_t			gc_cac_wreg;
1016 	/* protects concurrent se_cac register access */
1017 	spinlock_t se_cac_idx_lock;
1018 	amdgpu_rreg_t			se_cac_rreg;
1019 	amdgpu_wreg_t			se_cac_wreg;
1020 	/* protects concurrent ENDPOINT (audio) register access */
1021 	spinlock_t audio_endpt_idx_lock;
1022 	amdgpu_block_rreg_t		audio_endpt_rreg;
1023 	amdgpu_block_wreg_t		audio_endpt_wreg;
1024 	struct amdgpu_doorbell		doorbell;
1025 
1026 	/* clock/pll info */
1027 	struct amdgpu_clock            clock;
1028 
1029 	/* MC */
1030 	struct amdgpu_gmc		gmc;
1031 	struct amdgpu_gart		gart;
1032 	dma_addr_t			dummy_page_addr;
1033 	struct amdgpu_vm_manager	vm_manager;
1034 	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
1035 	DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
1036 
1037 	/* memory management */
1038 	struct amdgpu_mman		mman;
1039 	struct amdgpu_mem_scratch	mem_scratch;
1040 	struct amdgpu_wb		wb;
1041 	atomic64_t			num_bytes_moved;
1042 	atomic64_t			num_evictions;
1043 	atomic64_t			num_vram_cpu_page_faults;
1044 	atomic_t			gpu_reset_counter;
1045 	atomic_t			vram_lost_counter;
1046 
1047 	/* data for buffer migration throttling */
1048 	struct {
1049 		spinlock_t		lock;
1050 		s64			last_update_us;
1051 		s64			accum_us; /* accumulated microseconds */
1052 		s64			accum_us_vis; /* for visible VRAM */
1053 		u32			log2_max_MBps;
1054 	} mm_stats;
1055 
1056 	/* display */
1057 	bool				enable_virtual_display;
1058 	struct amdgpu_vkms_output       *amdgpu_vkms_output;
1059 	struct amdgpu_mode_info		mode_info;
1060 	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
1061 	struct delayed_work         hotplug_work;
1062 	struct amdgpu_irq_src		crtc_irq;
1063 	struct amdgpu_irq_src		vline0_irq;
1064 	struct amdgpu_irq_src		vupdate_irq;
1065 	struct amdgpu_irq_src		pageflip_irq;
1066 	struct amdgpu_irq_src		hpd_irq;
1067 	struct amdgpu_irq_src		dmub_trace_irq;
1068 	struct amdgpu_irq_src		dmub_outbox_irq;
1069 
1070 	/* rings */
1071 	u64				fence_context;
1072 	unsigned			num_rings;
1073 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
1074 	struct dma_fence __rcu		*gang_submit;
1075 	bool				ib_pool_ready;
1076 	struct amdgpu_sa_manager	ib_pools[AMDGPU_IB_POOL_MAX];
1077 	struct amdgpu_sched		gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
1078 
1079 	/* interrupts */
1080 	struct amdgpu_irq		irq;
1081 
1082 	/* powerplay */
1083 	struct amd_powerplay		powerplay;
1084 	struct amdgpu_pm		pm;
1085 	u64				cg_flags;
1086 	u32				pg_flags;
1087 
1088 	/* nbio */
1089 	struct amdgpu_nbio		nbio;
1090 
1091 	/* hdp */
1092 	struct amdgpu_hdp		hdp;
1093 
1094 	/* smuio */
1095 	struct amdgpu_smuio		smuio;
1096 
1097 	/* mmhub */
1098 	struct amdgpu_mmhub		mmhub;
1099 
1100 	/* gfxhub */
1101 	struct amdgpu_gfxhub		gfxhub;
1102 
1103 	/* gfx */
1104 	struct amdgpu_gfx		gfx;
1105 
1106 	/* sdma */
1107 	struct amdgpu_sdma		sdma;
1108 
1109 	/* lsdma */
1110 	struct amdgpu_lsdma		lsdma;
1111 
1112 	/* uvd */
1113 	struct amdgpu_uvd		uvd;
1114 
1115 	/* vce */
1116 	struct amdgpu_vce		vce;
1117 
1118 	/* vcn */
1119 	struct amdgpu_vcn		vcn;
1120 
1121 	/* jpeg */
1122 	struct amdgpu_jpeg		jpeg;
1123 
1124 	/* vpe */
1125 	struct amdgpu_vpe		vpe;
1126 
1127 	/* umsch */
1128 	struct amdgpu_umsch_mm		umsch_mm;
1129 	bool				enable_umsch_mm;
1130 
1131 	/* firmwares */
1132 	struct amdgpu_firmware		firmware;
1133 
1134 	/* PSP */
1135 	struct psp_context		psp;
1136 
1137 	/* GDS */
1138 	struct amdgpu_gds		gds;
1139 
1140 	/* for userq and VM fences */
1141 	struct amdgpu_seq64		seq64;
1142 
1143 	/* KFD */
1144 	struct amdgpu_kfd_dev		kfd;
1145 
1146 	/* UMC */
1147 	struct amdgpu_umc		umc;
1148 
1149 	/* display related functionality */
1150 	struct amdgpu_display_manager dm;
1151 
1152 #if defined(CONFIG_DRM_AMD_ISP)
1153 	/* isp */
1154 	struct amdgpu_isp		isp;
1155 #endif
1156 
1157 	/* mes */
1158 	bool                            enable_mes;
1159 	bool                            enable_mes_kiq;
1160 	bool                            enable_uni_mes;
1161 	struct amdgpu_mes               mes;
1162 	struct amdgpu_mqd               mqds[AMDGPU_HW_IP_NUM];
1163 	const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
1164 
1165 	/* xarray used to retrieve the user queue fence driver reference
1166 	 * in the EOP interrupt handler to signal the particular user
1167 	 * queue fence.
1168 	 */
1169 	struct xarray			userq_xa;
1170 
1171 	/* df */
1172 	struct amdgpu_df                df;
1173 
1174 	/* MCA */
1175 	struct amdgpu_mca               mca;
1176 
1177 	/* ACA */
1178 	struct amdgpu_aca		aca;
1179 
1180 	/* CPER */
1181 	struct amdgpu_cper		cper;
1182 
1183 	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
1184 	uint32_t		        harvest_ip_mask;
1185 	int				num_ip_blocks;
1186 	struct mutex	mn_lock;
1187 	DECLARE_HASHTABLE(mn_hash, 7);
1188 
1189 	/* tracking pinned memory */
1190 	atomic64_t vram_pin_size;
1191 	atomic64_t visible_pin_size;
1192 	atomic64_t gart_pin_size;
1193 
1194 	/* soc15 register offset based on ip, instance and  segment */
1195 	uint32_t		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1196 	struct amdgpu_ip_map_info	ip_map;
1197 
1198 	/* delayed work_func for deferring clockgating during resume */
1199 	struct delayed_work     delayed_init_work;
1200 
1201 	struct amdgpu_virt	virt;
1202 
1203 	/* record hw reset is performed */
1204 	bool has_hw_reset;
1205 	u8				reset_magic[AMDGPU_RESET_MAGIC_NUM];
1206 
1207 	/* s3/s4 mask */
1208 	bool                            in_suspend;
1209 	bool				in_s3;
1210 	bool				in_s4;
1211 	bool				in_s0ix;
1212 	suspend_state_t			last_suspend_state;
1213 
1214 	enum pp_mp1_state               mp1_state;
1215 	struct amdgpu_doorbell_index doorbell_index;
1216 
1217 	struct mutex			notifier_lock;
1218 
1219 	int asic_reset_res;
1220 	struct work_struct		xgmi_reset_work;
1221 	struct list_head		reset_list;
1222 
1223 	long				gfx_timeout;
1224 	long				sdma_timeout;
1225 	long				video_timeout;
1226 	long				compute_timeout;
1227 	long				psp_timeout;
1228 
1229 	uint64_t			unique_id;
1230 	uint64_t	df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
1231 
1232 	/* enable runtime pm on the device */
1233 	bool                            in_runpm;
1234 	bool                            has_pr3;
1235 
1236 	bool                            ucode_sysfs_en;
1237 
1238 	struct amdgpu_fru_info		*fru_info;
1239 	atomic_t			throttling_logging_enabled;
1240 	struct ratelimit_state		throttling_logging_rs;
1241 	uint32_t                        ras_hw_enabled;
1242 	uint32_t                        ras_enabled;
1243 	bool                            ras_default_ecc_enabled;
1244 
1245 	bool                            no_hw_access;
1246 	struct pci_saved_state          *pci_state;
1247 	pci_channel_state_t		pci_channel_state;
1248 
1249 	struct amdgpu_pcie_reset_ctx	pcie_reset_ctx;
1250 
1251 	/* Track auto wait count on s_barrier settings */
1252 	bool				barrier_has_auto_waitcnt;
1253 
1254 	struct amdgpu_reset_control     *reset_cntl;
1255 	uint32_t                        ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
1256 
1257 	bool				ram_is_direct_mapped;
1258 
1259 	struct list_head                ras_list;
1260 
1261 	struct ip_discovery_top         *ip_top;
1262 
1263 	struct amdgpu_reset_domain	*reset_domain;
1264 
1265 	struct mutex			benchmark_mutex;
1266 
1267 	bool                            scpm_enabled;
1268 	uint32_t                        scpm_status;
1269 
1270 	struct work_struct		reset_work;
1271 
1272 	bool                            dc_enabled;
1273 	/* Mask of active clusters */
1274 	uint32_t			aid_mask;
1275 
1276 	/* Debug */
1277 	bool                            debug_vm;
1278 	bool                            debug_largebar;
1279 	bool                            debug_disable_soft_recovery;
1280 	bool                            debug_use_vram_fw_buf;
1281 	bool                            debug_enable_ras_aca;
1282 	bool                            debug_exp_resets;
1283 	bool                            debug_disable_gpu_ring_reset;
1284 	bool                            debug_vm_userptr;
1285 
1286 	/* Protection for the following isolation structure */
1287 	struct mutex                    enforce_isolation_mutex;
1288 	enum amdgpu_enforce_isolation_mode	enforce_isolation[MAX_XCP];
1289 	struct amdgpu_isolation {
1290 		void			*owner;
1291 		struct dma_fence	*spearhead;
1292 		struct amdgpu_sync	active;
1293 		struct amdgpu_sync	prev;
1294 	} isolation[MAX_XCP];
1295 
1296 	struct amdgpu_init_level *init_lvl;
1297 
1298 	/* This flag is used to determine how VRAM allocations are handled for APUs
1299 	 * in KFD: VRAM or GTT.
1300 	 */
1301 	bool                            apu_prefer_gtt;
1302 
1303 	struct list_head		userq_mgr_list;
1304 	struct mutex                    userq_mutex;
1305 	bool                            userq_halt_for_enforce_isolation;
1306 };
1307 
amdgpu_ip_version(const struct amdgpu_device * adev,uint8_t ip,uint8_t inst)1308 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
1309 					 uint8_t ip, uint8_t inst)
1310 {
1311 	/* This considers only major/minor/rev and ignores
1312 	 * subrevision/variant fields.
1313 	 */
1314 	return adev->ip_versions[ip][inst] & ~0xFFU;
1315 }
1316 
amdgpu_ip_version_full(const struct amdgpu_device * adev,uint8_t ip,uint8_t inst)1317 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
1318 					      uint8_t ip, uint8_t inst)
1319 {
1320 	/* This returns full version - major/minor/rev/variant/subrevision */
1321 	return adev->ip_versions[ip][inst];
1322 }
1323 
drm_to_adev(struct drm_device * ddev)1324 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
1325 {
1326 	return container_of(ddev, struct amdgpu_device, ddev);
1327 }
1328 
adev_to_drm(struct amdgpu_device * adev)1329 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
1330 {
1331 	return &adev->ddev;
1332 }
1333 
amdgpu_ttm_adev(struct ttm_device * bdev)1334 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
1335 {
1336 	return container_of(bdev, struct amdgpu_device, mman.bdev);
1337 }
1338 
1339 int amdgpu_device_init(struct amdgpu_device *adev,
1340 		       uint32_t flags);
1341 void amdgpu_device_fini_hw(struct amdgpu_device *adev);
1342 void amdgpu_device_fini_sw(struct amdgpu_device *adev);
1343 
1344 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1345 
1346 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
1347 			     void *buf, size_t size, bool write);
1348 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1349 				 void *buf, size_t size, bool write);
1350 
1351 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
1352 			       void *buf, size_t size, bool write);
1353 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
1354 			    uint32_t inst, uint32_t reg_addr, char reg_name[],
1355 			    uint32_t expected_value, uint32_t mask);
1356 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
1357 			    uint32_t reg, uint32_t acc_flags);
1358 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1359 				    u64 reg_addr);
1360 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
1361 				uint32_t reg, uint32_t acc_flags,
1362 				uint32_t xcc_id);
1363 void amdgpu_device_wreg(struct amdgpu_device *adev,
1364 			uint32_t reg, uint32_t v,
1365 			uint32_t acc_flags);
1366 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1367 				     u64 reg_addr, u32 reg_data);
1368 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
1369 			    uint32_t reg, uint32_t v,
1370 			    uint32_t acc_flags,
1371 			    uint32_t xcc_id);
1372 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1373 			     uint32_t reg, uint32_t v, uint32_t xcc_id);
1374 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1375 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1376 
1377 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1378 				u32 reg_addr);
1379 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1380 				  u32 reg_addr);
1381 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1382 				  u64 reg_addr);
1383 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1384 				 u32 reg_addr, u32 reg_data);
1385 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1386 				   u32 reg_addr, u64 reg_data);
1387 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1388 				   u64 reg_addr, u64 reg_data);
1389 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
1390 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1391 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1392 
1393 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
1394 
1395 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
1396 				 struct amdgpu_reset_context *reset_context);
1397 
1398 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1399 			 struct amdgpu_reset_context *reset_context);
1400 
1401 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
1402 
1403 int emu_soc_asic_init(struct amdgpu_device *adev);
1404 
1405 /*
1406  * Registers read & write functions.
1407  */
1408 #define AMDGPU_REGS_NO_KIQ    (1<<1)
1409 #define AMDGPU_REGS_RLC	(1<<2)
1410 
1411 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1412 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1413 
1414 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
1415 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
1416 
1417 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1418 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1419 
1420 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
1421 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
1422 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
1423 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1424 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1425 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
1426 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
1427 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1428 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1429 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1430 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1431 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
1432 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
1433 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
1434 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
1435 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
1436 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
1437 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1438 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1439 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1440 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1441 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1442 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1443 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1444 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1445 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1446 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1447 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1448 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1449 #define WREG32_P(reg, val, mask)				\
1450 	do {							\
1451 		uint32_t tmp_ = RREG32(reg);			\
1452 		tmp_ &= (mask);					\
1453 		tmp_ |= ((val) & ~(mask));			\
1454 		WREG32(reg, tmp_);				\
1455 	} while (0)
1456 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1457 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1458 #define WREG32_PLL_P(reg, val, mask)				\
1459 	do {							\
1460 		uint32_t tmp_ = RREG32_PLL(reg);		\
1461 		tmp_ &= (mask);					\
1462 		tmp_ |= ((val) & ~(mask));			\
1463 		WREG32_PLL(reg, tmp_);				\
1464 	} while (0)
1465 
1466 #define WREG32_SMC_P(_Reg, _Val, _Mask)                         \
1467 	do {                                                    \
1468 		u32 tmp = RREG32_SMC(_Reg);                     \
1469 		tmp &= (_Mask);                                 \
1470 		tmp |= ((_Val) & ~(_Mask));                     \
1471 		WREG32_SMC(_Reg, tmp);                          \
1472 	} while (0)
1473 
1474 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
1475 
1476 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1477 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1478 
1479 #define REG_SET_FIELD(orig_val, reg, field, field_val)			\
1480 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
1481 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1482 
1483 #define REG_GET_FIELD(value, reg, field)				\
1484 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1485 
1486 #define WREG32_FIELD(reg, field, val)	\
1487 	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1488 
1489 #define WREG32_FIELD_OFFSET(reg, offset, field, val)	\
1490 	WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1491 
1492 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
1493 /*
1494  * BIOS helpers.
1495  */
1496 #define RBIOS8(i) (adev->bios[i])
1497 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1498 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1499 
1500 /*
1501  * ASICs macro.
1502  */
1503 #define amdgpu_asic_set_vga_state(adev, state) \
1504     ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1505 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1506 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
1507 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1508 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1509 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1510 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1511 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1512 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1513 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1514 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1515 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1516 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1517 #define amdgpu_asic_flush_hdp(adev, r) \
1518 	((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
1519 #define amdgpu_asic_invalidate_hdp(adev, r) \
1520 	((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
1521 	 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
1522 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1523 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1524 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1525 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1526 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
1527 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
1528 #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
1529 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
1530 	((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
1531 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
1532 
1533 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter))
1534 
1535 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
1536 #define for_each_inst(i, inst_mask)        \
1537 	for (i = ffs(inst_mask); i-- != 0; \
1538 	     i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
1539 
1540 /* Common functions */
1541 bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
1542 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
1543 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1544 			      struct amdgpu_job *job,
1545 			      struct amdgpu_reset_context *reset_context);
1546 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1547 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1548 bool amdgpu_device_need_post(struct amdgpu_device *adev);
1549 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
1550 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1551 
1552 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1553 				  u64 num_vis_bytes);
1554 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1555 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1556 					     const u32 *registers,
1557 					     const u32 array_size);
1558 
1559 int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
1560 int amdgpu_device_link_reset(struct amdgpu_device *adev);
1561 bool amdgpu_device_supports_atpx(struct drm_device *dev);
1562 bool amdgpu_device_supports_px(struct drm_device *dev);
1563 bool amdgpu_device_supports_boco(struct drm_device *dev);
1564 bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
1565 int amdgpu_device_supports_baco(struct drm_device *dev);
1566 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
1567 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
1568 				      struct amdgpu_device *peer_adev);
1569 int amdgpu_device_baco_enter(struct drm_device *dev);
1570 int amdgpu_device_baco_exit(struct drm_device *dev);
1571 
1572 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
1573 		struct amdgpu_ring *ring);
1574 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
1575 		struct amdgpu_ring *ring);
1576 
1577 void amdgpu_device_halt(struct amdgpu_device *adev);
1578 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
1579 				u32 reg);
1580 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
1581 				u32 reg, u32 v);
1582 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
1583 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
1584 					    struct dma_fence *gang);
1585 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
1586 						  struct amdgpu_ring *ring,
1587 						  struct amdgpu_job *job);
1588 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
1589 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
1590 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
1591 
1592 /* atpx handler */
1593 #if defined(CONFIG_VGA_SWITCHEROO)
1594 void amdgpu_register_atpx_handler(void);
1595 void amdgpu_unregister_atpx_handler(void);
1596 bool amdgpu_has_atpx_dgpu_power_cntl(void);
1597 bool amdgpu_is_atpx_hybrid(void);
1598 bool amdgpu_has_atpx(void);
1599 #else
amdgpu_register_atpx_handler(void)1600 static inline void amdgpu_register_atpx_handler(void) {}
amdgpu_unregister_atpx_handler(void)1601 static inline void amdgpu_unregister_atpx_handler(void) {}
amdgpu_has_atpx_dgpu_power_cntl(void)1602 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
amdgpu_is_atpx_hybrid(void)1603 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
amdgpu_has_atpx(void)1604 static inline bool amdgpu_has_atpx(void) { return false; }
1605 #endif
1606 
1607 /*
1608  * KMS
1609  */
1610 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1611 extern const int amdgpu_max_kms_ioctl;
1612 
1613 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
1614 void amdgpu_driver_unload_kms(struct drm_device *dev);
1615 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1616 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1617 				 struct drm_file *file_priv);
1618 void amdgpu_driver_release_kms(struct drm_device *dev);
1619 
1620 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1621 int amdgpu_device_prepare(struct drm_device *dev);
1622 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
1623 int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
1624 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
1625 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
1626 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
1627 int amdgpu_info_ioctl(struct drm_device *dev, void *data,
1628 		      struct drm_file *filp);
1629 
1630 /*
1631  * functions used by amdgpu_encoder.c
1632  */
1633 struct amdgpu_afmt_acr {
1634 	u32 clock;
1635 
1636 	int n_32khz;
1637 	int cts_32khz;
1638 
1639 	int n_44_1khz;
1640 	int cts_44_1khz;
1641 
1642 	int n_48khz;
1643 	int cts_48khz;
1644 
1645 };
1646 
1647 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1648 
1649 /* amdgpu_acpi.c */
1650 
1651 struct amdgpu_numa_info {
1652 	uint64_t size;
1653 	int pxm;
1654 	int nid;
1655 };
1656 
1657 /* ATCS Device/Driver State */
1658 #define AMDGPU_ATCS_PSC_DEV_STATE_D0		0
1659 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT	3
1660 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR		0
1661 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR	1
1662 
1663 #if defined(CONFIG_ACPI)
1664 int amdgpu_acpi_init(struct amdgpu_device *adev);
1665 void amdgpu_acpi_fini(struct amdgpu_device *adev);
1666 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1667 bool amdgpu_acpi_is_power_shift_control_supported(void);
1668 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1669 						u8 perf_req, bool advertise);
1670 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1671 				    u8 dev_state, bool drv_state);
1672 int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
1673 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1674 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
1675 			     u64 *tmr_size);
1676 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
1677 			     struct amdgpu_numa_info *numa_info);
1678 
1679 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1680 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
1681 void amdgpu_acpi_detect(void);
1682 void amdgpu_acpi_release(void);
1683 #else
amdgpu_acpi_init(struct amdgpu_device * adev)1684 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
amdgpu_acpi_get_tmr_info(struct amdgpu_device * adev,u64 * tmr_offset,u64 * tmr_size)1685 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
1686 					   u64 *tmr_offset, u64 *tmr_size)
1687 {
1688 	return -EINVAL;
1689 }
amdgpu_acpi_get_mem_info(struct amdgpu_device * adev,int xcc_id,struct amdgpu_numa_info * numa_info)1690 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
1691 					   int xcc_id,
1692 					   struct amdgpu_numa_info *numa_info)
1693 {
1694 	return -EINVAL;
1695 }
amdgpu_acpi_fini(struct amdgpu_device * adev)1696 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
amdgpu_acpi_should_gpu_reset(struct amdgpu_device * adev)1697 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
amdgpu_acpi_detect(void)1698 static inline void amdgpu_acpi_detect(void) { }
amdgpu_acpi_release(void)1699 static inline void amdgpu_acpi_release(void) { }
amdgpu_acpi_is_power_shift_control_supported(void)1700 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
amdgpu_acpi_power_shift_control(struct amdgpu_device * adev,u8 dev_state,bool drv_state)1701 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1702 						  u8 dev_state, bool drv_state) { return 0; }
amdgpu_acpi_smart_shift_update(struct drm_device * dev,enum amdgpu_ss ss_state)1703 static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1704 						 enum amdgpu_ss ss_state) { return 0; }
amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps * caps)1705 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
1706 #endif
1707 
1708 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1709 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1710 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1711 #else
amdgpu_acpi_is_s0ix_active(struct amdgpu_device * adev)1712 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
amdgpu_acpi_is_s3_active(struct amdgpu_device * adev)1713 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1714 #endif
1715 
1716 #if defined(CONFIG_DRM_AMD_ISP)
1717 int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
1718 #endif
1719 
1720 void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
1721 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
1722 
1723 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
1724 					   pci_channel_state_t state);
1725 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
1726 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
1727 void amdgpu_pci_resume(struct pci_dev *pdev);
1728 
1729 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
1730 bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
1731 
1732 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
1733 
1734 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1735 			       enum amd_clockgating_state state);
1736 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
1737 			       enum amd_powergating_state state);
1738 
amdgpu_device_has_timeouts_enabled(struct amdgpu_device * adev)1739 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
1740 {
1741 	return amdgpu_gpu_recovery != 0 &&
1742 		adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
1743 		adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
1744 		adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
1745 		adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
1746 }
1747 
1748 #include "amdgpu_object.h"
1749 
amdgpu_is_tmz(struct amdgpu_device * adev)1750 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
1751 {
1752        return adev->gmc.tmz_enabled;
1753 }
1754 
1755 int amdgpu_in_reset(struct amdgpu_device *adev);
1756 
1757 extern const struct attribute_group amdgpu_vram_mgr_attr_group;
1758 extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
1759 extern const struct attribute_group amdgpu_flash_attr_group;
1760 
1761 void amdgpu_set_init_level(struct amdgpu_device *adev,
1762 			   enum amdgpu_init_lvl_id lvl);
1763 #endif
1764