xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu.h (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30 
31 #ifdef pr_fmt
32 #undef pr_fmt
33 #endif
34 
35 #define pr_fmt(fmt) "amdgpu: " fmt
36 
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 
41 #define dev_fmt(fmt) "amdgpu: " fmt
42 
43 #include "amdgpu_ctx.h"
44 
45 #include <linux/atomic.h>
46 #include <linux/wait.h>
47 #include <linux/list.h>
48 #include <linux/kref.h>
49 #include <linux/rbtree.h>
50 #include <linux/hashtable.h>
51 #include <linux/dma-fence.h>
52 #include <linux/pci.h>
53 
54 #include <drm/ttm/ttm_bo.h>
55 #include <drm/ttm/ttm_placement.h>
56 
57 #include <drm/amdgpu_drm.h>
58 #include <drm/drm_gem.h>
59 #include <drm/drm_ioctl.h>
60 
61 #include <kgd_kfd_interface.h>
62 #include "dm_pp_interface.h"
63 #include "kgd_pp_interface.h"
64 
65 #include "amd_shared.h"
66 #include "amdgpu_mode.h"
67 #include "amdgpu_ih.h"
68 #include "amdgpu_irq.h"
69 #include "amdgpu_ucode.h"
70 #include "amdgpu_ttm.h"
71 #include "amdgpu_psp.h"
72 #include "amdgpu_gds.h"
73 #include "amdgpu_sync.h"
74 #include "amdgpu_ring.h"
75 #include "amdgpu_vm.h"
76 #include "amdgpu_dpm.h"
77 #include "amdgpu_acp.h"
78 #include "amdgpu_uvd.h"
79 #include "amdgpu_vce.h"
80 #include "amdgpu_vcn.h"
81 #include "amdgpu_jpeg.h"
82 #include "amdgpu_vpe.h"
83 #include "amdgpu_umsch_mm.h"
84 #include "amdgpu_gmc.h"
85 #include "amdgpu_gfx.h"
86 #include "amdgpu_sdma.h"
87 #include "amdgpu_lsdma.h"
88 #include "amdgpu_nbio.h"
89 #include "amdgpu_hdp.h"
90 #include "amdgpu_dm.h"
91 #include "amdgpu_virt.h"
92 #include "amdgpu_csa.h"
93 #include "amdgpu_mes_ctx.h"
94 #include "amdgpu_gart.h"
95 #include "amdgpu_debugfs.h"
96 #include "amdgpu_job.h"
97 #include "amdgpu_bo_list.h"
98 #include "amdgpu_gem.h"
99 #include "amdgpu_doorbell.h"
100 #include "amdgpu_amdkfd.h"
101 #include "amdgpu_discovery.h"
102 #include "amdgpu_mes.h"
103 #include "amdgpu_umc.h"
104 #include "amdgpu_mmhub.h"
105 #include "amdgpu_gfxhub.h"
106 #include "amdgpu_df.h"
107 #include "amdgpu_smuio.h"
108 #include "amdgpu_fdinfo.h"
109 #include "amdgpu_mca.h"
110 #include "amdgpu_aca.h"
111 #include "amdgpu_ras.h"
112 #include "amdgpu_cper.h"
113 #include "amdgpu_xcp.h"
114 #include "amdgpu_seq64.h"
115 #include "amdgpu_reg_state.h"
116 #include "amdgpu_userq.h"
117 #include "amdgpu_eviction_fence.h"
118 #if defined(CONFIG_DRM_AMD_ISP)
119 #include "amdgpu_isp.h"
120 #endif
121 
122 #define MAX_GPU_INSTANCE		64
123 
124 #define GFX_SLICE_PERIOD_MS		250
125 
126 struct amdgpu_gpu_instance {
127 	struct amdgpu_device		*adev;
128 	int				mgpu_fan_enabled;
129 };
130 
131 struct amdgpu_mgpu_info {
132 	struct amdgpu_gpu_instance	gpu_ins[MAX_GPU_INSTANCE];
133 	struct mutex			mutex;
134 	uint32_t			num_gpu;
135 	uint32_t			num_dgpu;
136 	uint32_t			num_apu;
137 };
138 
139 enum amdgpu_ss {
140 	AMDGPU_SS_DRV_LOAD,
141 	AMDGPU_SS_DEV_D0,
142 	AMDGPU_SS_DEV_D3,
143 	AMDGPU_SS_DRV_UNLOAD
144 };
145 
146 struct amdgpu_hwip_reg_entry {
147 	u32		hwip;
148 	u32		inst;
149 	u32		seg;
150 	u32		reg_offset;
151 	const char	*reg_name;
152 };
153 
154 struct amdgpu_watchdog_timer {
155 	bool timeout_fatal_disable;
156 	uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
157 };
158 
159 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH	256
160 
161 /*
162  * Modules parameters.
163  */
164 extern int amdgpu_modeset;
165 extern unsigned int amdgpu_vram_limit;
166 extern int amdgpu_vis_vram_limit;
167 extern int amdgpu_gart_size;
168 extern int amdgpu_gtt_size;
169 extern int amdgpu_moverate;
170 extern int amdgpu_audio;
171 extern int amdgpu_disp_priority;
172 extern int amdgpu_hw_i2c;
173 extern int amdgpu_pcie_gen2;
174 extern int amdgpu_msi;
175 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
176 extern int amdgpu_dpm;
177 extern int amdgpu_fw_load_type;
178 extern int amdgpu_aspm;
179 extern int amdgpu_runtime_pm;
180 extern uint amdgpu_ip_block_mask;
181 extern int amdgpu_bapm;
182 extern int amdgpu_deep_color;
183 extern int amdgpu_vm_size;
184 extern int amdgpu_vm_block_size;
185 extern int amdgpu_vm_fragment_size;
186 extern int amdgpu_vm_fault_stop;
187 extern int amdgpu_vm_debug;
188 extern int amdgpu_vm_update_mode;
189 extern int amdgpu_exp_hw_support;
190 extern int amdgpu_dc;
191 extern int amdgpu_sched_jobs;
192 extern int amdgpu_sched_hw_submission;
193 extern uint amdgpu_pcie_gen_cap;
194 extern uint amdgpu_pcie_lane_cap;
195 extern u64 amdgpu_cg_mask;
196 extern uint amdgpu_pg_mask;
197 extern uint amdgpu_sdma_phase_quantum;
198 extern char *amdgpu_disable_cu;
199 extern char *amdgpu_virtual_display;
200 extern uint amdgpu_pp_feature_mask;
201 extern uint amdgpu_force_long_training;
202 extern int amdgpu_lbpw;
203 extern int amdgpu_compute_multipipe;
204 extern int amdgpu_gpu_recovery;
205 extern int amdgpu_emu_mode;
206 extern uint amdgpu_smu_memory_pool_size;
207 extern int amdgpu_smu_pptable_id;
208 extern uint amdgpu_dc_feature_mask;
209 extern uint amdgpu_freesync_vid_mode;
210 extern uint amdgpu_dc_debug_mask;
211 extern uint amdgpu_dc_visual_confirm;
212 extern int amdgpu_dm_abm_level;
213 extern int amdgpu_backlight;
214 extern int amdgpu_damage_clips;
215 extern struct amdgpu_mgpu_info mgpu_info;
216 extern int amdgpu_ras_enable;
217 extern uint amdgpu_ras_mask;
218 extern int amdgpu_bad_page_threshold;
219 extern bool amdgpu_ignore_bad_page_threshold;
220 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
221 extern int amdgpu_async_gfx_ring;
222 extern int amdgpu_mcbp;
223 extern int amdgpu_discovery;
224 extern int amdgpu_mes;
225 extern int amdgpu_mes_log_enable;
226 extern int amdgpu_mes_kiq;
227 extern int amdgpu_uni_mes;
228 extern int amdgpu_noretry;
229 extern int amdgpu_force_asic_type;
230 extern int amdgpu_smartshift_bias;
231 extern int amdgpu_use_xgmi_p2p;
232 extern int amdgpu_mtype_local;
233 extern int amdgpu_enforce_isolation;
234 #ifdef CONFIG_HSA_AMD
235 extern int sched_policy;
236 extern bool debug_evictions;
237 extern bool no_system_mem_limit;
238 extern int halt_if_hws_hang;
239 extern uint amdgpu_svm_default_granularity;
240 #else
241 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
242 static const bool __maybe_unused debug_evictions; /* = false */
243 static const bool __maybe_unused no_system_mem_limit;
244 static const int __maybe_unused halt_if_hws_hang;
245 #endif
246 #ifdef CONFIG_HSA_AMD_P2P
247 extern bool pcie_p2p;
248 #endif
249 
250 extern int amdgpu_tmz;
251 extern int amdgpu_reset_method;
252 
253 #ifdef CONFIG_DRM_AMDGPU_SI
254 extern int amdgpu_si_support;
255 #endif
256 #ifdef CONFIG_DRM_AMDGPU_CIK
257 extern int amdgpu_cik_support;
258 #endif
259 extern int amdgpu_num_kcq;
260 
261 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
262 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024)
263 extern int amdgpu_vcnfw_log;
264 extern int amdgpu_sg_display;
265 extern int amdgpu_umsch_mm;
266 extern int amdgpu_seamless;
267 extern int amdgpu_umsch_mm_fwlog;
268 
269 extern int amdgpu_user_partt_mode;
270 extern int amdgpu_agp;
271 extern int amdgpu_rebar;
272 
273 extern int amdgpu_wbrf;
274 extern int amdgpu_user_queue;
275 
276 #define AMDGPU_VM_MAX_NUM_CTX			4096
277 #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
278 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
279 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
280 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
281 #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
282 #define AMDGPUFB_CONN_LIMIT			4
283 #define AMDGPU_BIOS_NUM_SCRATCH			16
284 
285 #define AMDGPU_VBIOS_VGA_ALLOCATION		(9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
286 
287 /* hard reset data */
288 #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
289 
290 /* reset flags */
291 #define AMDGPU_RESET_GFX			(1 << 0)
292 #define AMDGPU_RESET_COMPUTE			(1 << 1)
293 #define AMDGPU_RESET_DMA			(1 << 2)
294 #define AMDGPU_RESET_CP				(1 << 3)
295 #define AMDGPU_RESET_GRBM			(1 << 4)
296 #define AMDGPU_RESET_DMA1			(1 << 5)
297 #define AMDGPU_RESET_RLC			(1 << 6)
298 #define AMDGPU_RESET_SEM			(1 << 7)
299 #define AMDGPU_RESET_IH				(1 << 8)
300 #define AMDGPU_RESET_VMC			(1 << 9)
301 #define AMDGPU_RESET_MC				(1 << 10)
302 #define AMDGPU_RESET_DISPLAY			(1 << 11)
303 #define AMDGPU_RESET_UVD			(1 << 12)
304 #define AMDGPU_RESET_VCE			(1 << 13)
305 #define AMDGPU_RESET_VCE1			(1 << 14)
306 
307 /* reset mask */
308 #define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
309 #define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
310 #define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
311 #define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
312 
313 /* max cursor sizes (in pixels) */
314 #define CIK_CURSOR_WIDTH 128
315 #define CIK_CURSOR_HEIGHT 128
316 
317 /* smart shift bias level limits */
318 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
319 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
320 
321 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
322 #define AMDGPU_SWCTF_EXTRA_DELAY		50
323 
324 struct amdgpu_xcp_mgr;
325 struct amdgpu_device;
326 struct amdgpu_irq_src;
327 struct amdgpu_fpriv;
328 struct amdgpu_bo_va_mapping;
329 struct kfd_vm_fault_info;
330 struct amdgpu_hive_info;
331 struct amdgpu_reset_context;
332 struct amdgpu_reset_control;
333 
334 enum amdgpu_cp_irq {
335 	AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
336 	AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
337 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
338 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
339 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
340 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
341 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
342 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
343 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
344 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
345 
346 	AMDGPU_CP_IRQ_LAST
347 };
348 
349 enum amdgpu_thermal_irq {
350 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
351 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
352 
353 	AMDGPU_THERMAL_IRQ_LAST
354 };
355 
356 enum amdgpu_kiq_irq {
357 	AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
358 	AMDGPU_CP_KIQ_IRQ_LAST
359 };
360 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
361 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
362 #define MAX_KIQ_REG_TRY 1000
363 
364 int amdgpu_device_ip_set_clockgating_state(void *dev,
365 					   enum amd_ip_block_type block_type,
366 					   enum amd_clockgating_state state);
367 int amdgpu_device_ip_set_powergating_state(void *dev,
368 					   enum amd_ip_block_type block_type,
369 					   enum amd_powergating_state state);
370 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
371 					    u64 *flags);
372 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
373 				   enum amd_ip_block_type block_type);
374 bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
375 			      enum amd_ip_block_type block_type);
376 int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
377 
378 int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
379 
380 #define AMDGPU_MAX_IP_NUM 16
381 
382 struct amdgpu_ip_block_status {
383 	bool valid;
384 	bool sw;
385 	bool hw;
386 	bool late_initialized;
387 	bool hang;
388 };
389 
390 struct amdgpu_ip_block_version {
391 	const enum amd_ip_block_type type;
392 	const u32 major;
393 	const u32 minor;
394 	const u32 rev;
395 	const struct amd_ip_funcs *funcs;
396 };
397 
398 struct amdgpu_ip_block {
399 	struct amdgpu_ip_block_status status;
400 	const struct amdgpu_ip_block_version *version;
401 	struct amdgpu_device *adev;
402 };
403 
404 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
405 				       enum amd_ip_block_type type,
406 				       u32 major, u32 minor);
407 
408 struct amdgpu_ip_block *
409 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
410 			      enum amd_ip_block_type type);
411 
412 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
413 			       const struct amdgpu_ip_block_version *ip_block_version);
414 
415 /*
416  * BIOS.
417  */
418 bool amdgpu_get_bios(struct amdgpu_device *adev);
419 bool amdgpu_read_bios(struct amdgpu_device *adev);
420 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
421 				     u8 *bios, u32 length_bytes);
422 void amdgpu_bios_release(struct amdgpu_device *adev);
423 /*
424  * Clocks
425  */
426 
427 #define AMDGPU_MAX_PPLL 3
428 
429 struct amdgpu_clock {
430 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
431 	struct amdgpu_pll spll;
432 	struct amdgpu_pll mpll;
433 	/* 10 Khz units */
434 	uint32_t default_mclk;
435 	uint32_t default_sclk;
436 	uint32_t default_dispclk;
437 	uint32_t current_dispclk;
438 	uint32_t dp_extclk;
439 	uint32_t max_pixel_clock;
440 };
441 
442 /* sub-allocation manager, it has to be protected by another lock.
443  * By conception this is an helper for other part of the driver
444  * like the indirect buffer or semaphore, which both have their
445  * locking.
446  *
447  * Principe is simple, we keep a list of sub allocation in offset
448  * order (first entry has offset == 0, last entry has the highest
449  * offset).
450  *
451  * When allocating new object we first check if there is room at
452  * the end total_size - (last_object_offset + last_object_size) >=
453  * alloc_size. If so we allocate new object there.
454  *
455  * When there is not enough room at the end, we start waiting for
456  * each sub object until we reach object_offset+object_size >=
457  * alloc_size, this object then become the sub object we return.
458  *
459  * Alignment can't be bigger than page size.
460  *
461  * Hole are not considered for allocation to keep things simple.
462  * Assumption is that there won't be hole (all object on same
463  * alignment).
464  */
465 
466 struct amdgpu_sa_manager {
467 	struct drm_suballoc_manager	base;
468 	struct amdgpu_bo		*bo;
469 	uint64_t			gpu_addr;
470 	void				*cpu_ptr;
471 };
472 
473 /*
474  * IRQS.
475  */
476 
477 struct amdgpu_flip_work {
478 	struct delayed_work		flip_work;
479 	struct work_struct		unpin_work;
480 	struct amdgpu_device		*adev;
481 	int				crtc_id;
482 	u32				target_vblank;
483 	uint64_t			base;
484 	struct drm_pending_vblank_event *event;
485 	struct amdgpu_bo		*old_abo;
486 	unsigned			shared_count;
487 	struct dma_fence		**shared;
488 	struct dma_fence_cb		cb;
489 	bool				async;
490 };
491 
492 /*
493  * file private structure
494  */
495 
496 struct amdgpu_fpriv {
497 	struct amdgpu_vm	vm;
498 	struct amdgpu_bo_va	*prt_va;
499 	struct amdgpu_bo_va	*csa_va;
500 	struct amdgpu_bo_va	*seq64_va;
501 	struct mutex		bo_list_lock;
502 	struct idr		bo_list_handles;
503 	struct amdgpu_ctx_mgr	ctx_mgr;
504 	struct amdgpu_userq_mgr	userq_mgr;
505 
506 	/* Eviction fence infra */
507 	struct amdgpu_eviction_fence_mgr evf_mgr;
508 
509 	/** GPU partition selection */
510 	uint32_t		xcp_id;
511 };
512 
513 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
514 
515 /*
516  * Writeback
517  */
518 #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
519 
520 /**
521  * amdgpu_wb - This struct is used for small GPU memory allocation.
522  *
523  * This struct is used to allocate a small amount of GPU memory that can be
524  * used to shadow certain states into the memory. This is especially useful for
525  * providing easy CPU access to some states without requiring register access
526  * (e.g., if some block is power gated, reading register may be problematic).
527  *
528  * Note: the term writeback was initially used because many of the amdgpu
529  * components had some level of writeback memory, and this struct initially
530  * described those components.
531  */
532 struct amdgpu_wb {
533 
534 	/**
535 	 * @wb_obj:
536 	 *
537 	 * Buffer Object used for the writeback memory.
538 	 */
539 	struct amdgpu_bo	*wb_obj;
540 
541 	/**
542 	 * @wb:
543 	 *
544 	 * Pointer to the first writeback slot. In terms of CPU address
545 	 * this value can be accessed directly by using the offset as an index.
546 	 * For the GPU address, it is necessary to use gpu_addr and the offset.
547 	 */
548 	volatile uint32_t	*wb;
549 
550 	/**
551 	 * @gpu_addr:
552 	 *
553 	 * Writeback base address in the GPU.
554 	 */
555 	uint64_t		gpu_addr;
556 
557 	/**
558 	 * @num_wb:
559 	 *
560 	 * Number of writeback slots reserved for amdgpu.
561 	 */
562 	u32			num_wb;
563 
564 	/**
565 	 * @used:
566 	 *
567 	 * Track the writeback slot already used.
568 	 */
569 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
570 
571 	/**
572 	 * @lock:
573 	 *
574 	 * Protects read and write of the used field array.
575 	 */
576 	spinlock_t		lock;
577 };
578 
579 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
580 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
581 
582 /*
583  * Benchmarking
584  */
585 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
586 
587 /*
588  * ASIC specific register table accessible by UMD
589  */
590 struct amdgpu_allowed_register_entry {
591 	uint32_t reg_offset;
592 	bool grbm_indexed;
593 };
594 
595 /**
596  * enum amd_reset_method - Methods for resetting AMD GPU devices
597  *
598  * @AMD_RESET_METHOD_NONE: The device will not be reset.
599  * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
600  * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
601  *                   any device.
602  * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
603  *                   individually. Suitable only for some discrete GPU, not
604  *                   available for all ASICs.
605  * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
606  *                   are reset depends on the ASIC. Notably doesn't reset IPs
607  *                   shared with the CPU on APUs or the memory controllers (so
608  *                   VRAM is not lost). Not available on all ASICs.
609  * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
610  * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
611  *                  but without powering off the PCI bus. Suitable only for
612  *                  discrete GPUs.
613  * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
614  *                 and does a secondary bus reset or FLR, depending on what the
615  *                 underlying hardware supports.
616  *
617  * Methods available for AMD GPU driver for resetting the device. Not all
618  * methods are suitable for every device. User can override the method using
619  * module parameter `reset_method`.
620  */
621 enum amd_reset_method {
622 	AMD_RESET_METHOD_NONE = -1,
623 	AMD_RESET_METHOD_LEGACY = 0,
624 	AMD_RESET_METHOD_MODE0,
625 	AMD_RESET_METHOD_MODE1,
626 	AMD_RESET_METHOD_MODE2,
627 	AMD_RESET_METHOD_LINK,
628 	AMD_RESET_METHOD_BACO,
629 	AMD_RESET_METHOD_PCI,
630 	AMD_RESET_METHOD_ON_INIT,
631 };
632 
633 struct amdgpu_video_codec_info {
634 	u32 codec_type;
635 	u32 max_width;
636 	u32 max_height;
637 	u32 max_pixels_per_frame;
638 	u32 max_level;
639 };
640 
641 #define codec_info_build(type, width, height, level) \
642 			 .codec_type = type,\
643 			 .max_width = width,\
644 			 .max_height = height,\
645 			 .max_pixels_per_frame = height * width,\
646 			 .max_level = level,
647 
648 struct amdgpu_video_codecs {
649 	const u32 codec_count;
650 	const struct amdgpu_video_codec_info *codec_array;
651 };
652 
653 /*
654  * ASIC specific functions.
655  */
656 struct amdgpu_asic_funcs {
657 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
658 	bool (*read_bios_from_rom)(struct amdgpu_device *adev,
659 				   u8 *bios, u32 length_bytes);
660 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
661 			     u32 sh_num, u32 reg_offset, u32 *value);
662 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
663 	int (*reset)(struct amdgpu_device *adev);
664 	enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
665 	/* get the reference clock */
666 	u32 (*get_xclk)(struct amdgpu_device *adev);
667 	/* MM block clocks */
668 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
669 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
670 	/* static power management */
671 	int (*get_pcie_lanes)(struct amdgpu_device *adev);
672 	void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
673 	/* get config memsize register */
674 	u32 (*get_config_memsize)(struct amdgpu_device *adev);
675 	/* flush hdp write queue */
676 	void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
677 	/* invalidate hdp read cache */
678 	void (*invalidate_hdp)(struct amdgpu_device *adev,
679 			       struct amdgpu_ring *ring);
680 	/* check if the asic needs a full reset of if soft reset will work */
681 	bool (*need_full_reset)(struct amdgpu_device *adev);
682 	/* initialize doorbell layout for specific asic*/
683 	void (*init_doorbell_index)(struct amdgpu_device *adev);
684 	/* PCIe bandwidth usage */
685 	void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
686 			       uint64_t *count1);
687 	/* do we need to reset the asic at init time (e.g., kexec) */
688 	bool (*need_reset_on_init)(struct amdgpu_device *adev);
689 	/* PCIe replay counter */
690 	uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
691 	/* device supports BACO */
692 	int (*supports_baco)(struct amdgpu_device *adev);
693 	/* pre asic_init quirks */
694 	void (*pre_asic_init)(struct amdgpu_device *adev);
695 	/* enter/exit umd stable pstate */
696 	int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
697 	/* query video codecs */
698 	int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
699 				  const struct amdgpu_video_codecs **codecs);
700 	/* encode "> 32bits" smn addressing */
701 	u64 (*encode_ext_smn_addressing)(int ext_id);
702 
703 	ssize_t (*get_reg_state)(struct amdgpu_device *adev,
704 				 enum amdgpu_reg_state reg_state, void *buf,
705 				 size_t max_size);
706 };
707 
708 /*
709  * IOCTL.
710  */
711 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
712 				struct drm_file *filp);
713 
714 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
715 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
716 				    struct drm_file *filp);
717 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
718 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
719 				struct drm_file *filp);
720 
721 /* VRAM scratch page for HDP bug, default vram page */
722 struct amdgpu_mem_scratch {
723 	struct amdgpu_bo		*robj;
724 	volatile uint32_t		*ptr;
725 	u64				gpu_addr;
726 };
727 
728 /*
729  * CGS
730  */
731 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
732 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
733 
734 /*
735  * Core structure, functions and helpers.
736  */
737 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
738 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
739 
740 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
741 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
742 
743 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
744 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
745 
746 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t);
747 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t);
748 
749 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
750 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
751 
752 struct amdgpu_mmio_remap {
753 	u32 reg_offset;
754 	resource_size_t bus_addr;
755 };
756 
757 /* Define the HW IP blocks will be used in driver , add more if necessary */
758 enum amd_hw_ip_block_type {
759 	GC_HWIP = 1,
760 	HDP_HWIP,
761 	SDMA0_HWIP,
762 	SDMA1_HWIP,
763 	SDMA2_HWIP,
764 	SDMA3_HWIP,
765 	SDMA4_HWIP,
766 	SDMA5_HWIP,
767 	SDMA6_HWIP,
768 	SDMA7_HWIP,
769 	LSDMA_HWIP,
770 	MMHUB_HWIP,
771 	ATHUB_HWIP,
772 	NBIO_HWIP,
773 	MP0_HWIP,
774 	MP1_HWIP,
775 	UVD_HWIP,
776 	VCN_HWIP = UVD_HWIP,
777 	JPEG_HWIP = VCN_HWIP,
778 	VCN1_HWIP,
779 	VCE_HWIP,
780 	VPE_HWIP,
781 	DF_HWIP,
782 	DCE_HWIP,
783 	OSSSYS_HWIP,
784 	SMUIO_HWIP,
785 	PWR_HWIP,
786 	NBIF_HWIP,
787 	THM_HWIP,
788 	CLK_HWIP,
789 	UMC_HWIP,
790 	RSMU_HWIP,
791 	XGMI_HWIP,
792 	DCI_HWIP,
793 	PCIE_HWIP,
794 	ISP_HWIP,
795 	MAX_HWIP
796 };
797 
798 #define HWIP_MAX_INSTANCE	44
799 
800 #define HW_ID_MAX		300
801 #define IP_VERSION_FULL(mj, mn, rv, var, srev) \
802 	(((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev))
803 #define IP_VERSION(mj, mn, rv)		IP_VERSION_FULL(mj, mn, rv, 0, 0)
804 #define IP_VERSION_MAJ(ver)		((ver) >> 24)
805 #define IP_VERSION_MIN(ver)		(((ver) >> 16) & 0xFF)
806 #define IP_VERSION_REV(ver)		(((ver) >> 8) & 0xFF)
807 #define IP_VERSION_VARIANT(ver)		(((ver) >> 4) & 0xF)
808 #define IP_VERSION_SUBREV(ver)		((ver) & 0xF)
809 #define IP_VERSION_MAJ_MIN_REV(ver)	((ver) >> 8)
810 
811 struct amdgpu_ip_map_info {
812 	/* Map of logical to actual dev instances/mask */
813 	uint32_t 		dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
814 	int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
815 				      enum amd_hw_ip_block_type block,
816 				      int8_t inst);
817 	uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
818 					enum amd_hw_ip_block_type block,
819 					uint32_t mask);
820 };
821 
822 struct amd_powerplay {
823 	void *pp_handle;
824 	const struct amd_pm_funcs *pp_funcs;
825 };
826 
827 struct ip_discovery_top;
828 
829 /* polaris10 kickers */
830 #define ASICID_IS_P20(did, rid)		(((did == 0x67DF) && \
831 					 ((rid == 0xE3) || \
832 					  (rid == 0xE4) || \
833 					  (rid == 0xE5) || \
834 					  (rid == 0xE7) || \
835 					  (rid == 0xEF))) || \
836 					 ((did == 0x6FDF) && \
837 					 ((rid == 0xE7) || \
838 					  (rid == 0xEF) || \
839 					  (rid == 0xFF))))
840 
841 #define ASICID_IS_P30(did, rid)		((did == 0x67DF) && \
842 					((rid == 0xE1) || \
843 					 (rid == 0xF7)))
844 
845 /* polaris11 kickers */
846 #define ASICID_IS_P21(did, rid)		(((did == 0x67EF) && \
847 					 ((rid == 0xE0) || \
848 					  (rid == 0xE5))) || \
849 					 ((did == 0x67FF) && \
850 					 ((rid == 0xCF) || \
851 					  (rid == 0xEF) || \
852 					  (rid == 0xFF))))
853 
854 #define ASICID_IS_P31(did, rid)		((did == 0x67EF) && \
855 					((rid == 0xE2)))
856 
857 /* polaris12 kickers */
858 #define ASICID_IS_P23(did, rid)		(((did == 0x6987) && \
859 					 ((rid == 0xC0) || \
860 					  (rid == 0xC1) || \
861 					  (rid == 0xC3) || \
862 					  (rid == 0xC7))) || \
863 					 ((did == 0x6981) && \
864 					 ((rid == 0x00) || \
865 					  (rid == 0x01) || \
866 					  (rid == 0x10))))
867 
868 struct amdgpu_mqd_prop {
869 	uint64_t mqd_gpu_addr;
870 	uint64_t hqd_base_gpu_addr;
871 	uint64_t rptr_gpu_addr;
872 	uint64_t wptr_gpu_addr;
873 	uint32_t queue_size;
874 	bool use_doorbell;
875 	uint32_t doorbell_index;
876 	uint64_t eop_gpu_addr;
877 	uint32_t hqd_pipe_priority;
878 	uint32_t hqd_queue_priority;
879 	bool allow_tunneling;
880 	bool hqd_active;
881 	uint64_t shadow_addr;
882 	uint64_t gds_bkup_addr;
883 	uint64_t csa_addr;
884 	uint64_t fence_address;
885 	bool tmz_queue;
886 };
887 
888 struct amdgpu_mqd {
889 	unsigned mqd_size;
890 	int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
891 			struct amdgpu_mqd_prop *p);
892 };
893 
894 struct amdgpu_pcie_reset_ctx {
895 	bool in_link_reset;
896 	bool occurs_dpc;
897 	bool audio_suspended;
898 };
899 
900 /*
901  * Custom Init levels could be defined for different situations where a full
902  * initialization of all hardware blocks are not expected. Sample cases are
903  * custom init sequences after resume after S0i3/S3, reset on initialization,
904  * partial reset of blocks etc. Presently, this defines only two levels. Levels
905  * are described in corresponding struct definitions - amdgpu_init_default,
906  * amdgpu_init_minimal_xgmi.
907  */
908 enum amdgpu_init_lvl_id {
909 	AMDGPU_INIT_LEVEL_DEFAULT,
910 	AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
911 	AMDGPU_INIT_LEVEL_RESET_RECOVERY,
912 };
913 
914 struct amdgpu_init_level {
915 	enum amdgpu_init_lvl_id level;
916 	uint32_t hwini_ip_block_mask;
917 };
918 
919 #define AMDGPU_RESET_MAGIC_NUM 64
920 #define AMDGPU_MAX_DF_PERFMONS 4
921 struct amdgpu_reset_domain;
922 struct amdgpu_fru_info;
923 
924 enum amdgpu_enforce_isolation_mode {
925 	AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
926 	AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
927 	AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
928 	AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
929 };
930 
931 
932 /*
933  * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
934  */
935 #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
936 
937 struct amdgpu_device {
938 	struct device			*dev;
939 	struct pci_dev			*pdev;
940 	struct drm_device		ddev;
941 
942 #ifdef CONFIG_DRM_AMD_ACP
943 	struct amdgpu_acp		acp;
944 #endif
945 	struct amdgpu_hive_info *hive;
946 	struct amdgpu_xcp_mgr *xcp_mgr;
947 	/* ASIC */
948 	enum amd_asic_type		asic_type;
949 	uint32_t			family;
950 	uint32_t			rev_id;
951 	uint32_t			external_rev_id;
952 	unsigned long			flags;
953 	unsigned long			apu_flags;
954 	int				usec_timeout;
955 	const struct amdgpu_asic_funcs	*asic_funcs;
956 	bool				shutdown;
957 	bool				need_swiotlb;
958 	bool				accel_working;
959 	struct notifier_block		acpi_nb;
960 	struct notifier_block		pm_nb;
961 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
962 	struct debugfs_blob_wrapper     debugfs_vbios_blob;
963 	struct debugfs_blob_wrapper     debugfs_discovery_blob;
964 	struct mutex			srbm_mutex;
965 	/* GRBM index mutex. Protects concurrent access to GRBM index */
966 	struct mutex                    grbm_idx_mutex;
967 	struct dev_pm_domain		vga_pm_domain;
968 	bool				have_disp_power_ref;
969 	bool                            have_atomics_support;
970 
971 	/* BIOS */
972 	bool				is_atom_fw;
973 	uint8_t				*bios;
974 	uint32_t			bios_size;
975 	uint32_t			bios_scratch_reg_offset;
976 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
977 
978 	/* Register/doorbell mmio */
979 	resource_size_t			rmmio_base;
980 	resource_size_t			rmmio_size;
981 	void __iomem			*rmmio;
982 	/* protects concurrent MM_INDEX/DATA based register access */
983 	spinlock_t mmio_idx_lock;
984 	struct amdgpu_mmio_remap        rmmio_remap;
985 	/* protects concurrent SMC based register access */
986 	spinlock_t smc_idx_lock;
987 	amdgpu_rreg_t			smc_rreg;
988 	amdgpu_wreg_t			smc_wreg;
989 	/* protects concurrent PCIE register access */
990 	spinlock_t pcie_idx_lock;
991 	amdgpu_rreg_t			pcie_rreg;
992 	amdgpu_wreg_t			pcie_wreg;
993 	amdgpu_rreg_t			pciep_rreg;
994 	amdgpu_wreg_t			pciep_wreg;
995 	amdgpu_rreg_ext_t		pcie_rreg_ext;
996 	amdgpu_wreg_ext_t		pcie_wreg_ext;
997 	amdgpu_rreg64_t			pcie_rreg64;
998 	amdgpu_wreg64_t			pcie_wreg64;
999 	amdgpu_rreg64_ext_t			pcie_rreg64_ext;
1000 	amdgpu_wreg64_ext_t			pcie_wreg64_ext;
1001 	/* protects concurrent UVD register access */
1002 	spinlock_t uvd_ctx_idx_lock;
1003 	amdgpu_rreg_t			uvd_ctx_rreg;
1004 	amdgpu_wreg_t			uvd_ctx_wreg;
1005 	/* protects concurrent DIDT register access */
1006 	spinlock_t didt_idx_lock;
1007 	amdgpu_rreg_t			didt_rreg;
1008 	amdgpu_wreg_t			didt_wreg;
1009 	/* protects concurrent gc_cac register access */
1010 	spinlock_t gc_cac_idx_lock;
1011 	amdgpu_rreg_t			gc_cac_rreg;
1012 	amdgpu_wreg_t			gc_cac_wreg;
1013 	/* protects concurrent se_cac register access */
1014 	spinlock_t se_cac_idx_lock;
1015 	amdgpu_rreg_t			se_cac_rreg;
1016 	amdgpu_wreg_t			se_cac_wreg;
1017 	/* protects concurrent ENDPOINT (audio) register access */
1018 	spinlock_t audio_endpt_idx_lock;
1019 	amdgpu_block_rreg_t		audio_endpt_rreg;
1020 	amdgpu_block_wreg_t		audio_endpt_wreg;
1021 	struct amdgpu_doorbell		doorbell;
1022 
1023 	/* clock/pll info */
1024 	struct amdgpu_clock            clock;
1025 
1026 	/* MC */
1027 	struct amdgpu_gmc		gmc;
1028 	struct amdgpu_gart		gart;
1029 	dma_addr_t			dummy_page_addr;
1030 	struct amdgpu_vm_manager	vm_manager;
1031 	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
1032 	DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
1033 
1034 	/* memory management */
1035 	struct amdgpu_mman		mman;
1036 	struct amdgpu_mem_scratch	mem_scratch;
1037 	struct amdgpu_wb		wb;
1038 	atomic64_t			num_bytes_moved;
1039 	atomic64_t			num_evictions;
1040 	atomic64_t			num_vram_cpu_page_faults;
1041 	atomic_t			gpu_reset_counter;
1042 	atomic_t			vram_lost_counter;
1043 
1044 	/* data for buffer migration throttling */
1045 	struct {
1046 		spinlock_t		lock;
1047 		s64			last_update_us;
1048 		s64			accum_us; /* accumulated microseconds */
1049 		s64			accum_us_vis; /* for visible VRAM */
1050 		u32			log2_max_MBps;
1051 	} mm_stats;
1052 
1053 	/* display */
1054 	bool				enable_virtual_display;
1055 	struct amdgpu_vkms_output       *amdgpu_vkms_output;
1056 	struct amdgpu_mode_info		mode_info;
1057 	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
1058 	struct delayed_work         hotplug_work;
1059 	struct amdgpu_irq_src		crtc_irq;
1060 	struct amdgpu_irq_src		vline0_irq;
1061 	struct amdgpu_irq_src		vupdate_irq;
1062 	struct amdgpu_irq_src		pageflip_irq;
1063 	struct amdgpu_irq_src		hpd_irq;
1064 	struct amdgpu_irq_src		dmub_trace_irq;
1065 	struct amdgpu_irq_src		dmub_outbox_irq;
1066 
1067 	/* rings */
1068 	u64				fence_context;
1069 	unsigned			num_rings;
1070 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
1071 	struct dma_fence __rcu		*gang_submit;
1072 	bool				ib_pool_ready;
1073 	struct amdgpu_sa_manager	ib_pools[AMDGPU_IB_POOL_MAX];
1074 	struct amdgpu_sched		gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
1075 
1076 	/* interrupts */
1077 	struct amdgpu_irq		irq;
1078 
1079 	/* powerplay */
1080 	struct amd_powerplay		powerplay;
1081 	struct amdgpu_pm		pm;
1082 	u64				cg_flags;
1083 	u32				pg_flags;
1084 
1085 	/* nbio */
1086 	struct amdgpu_nbio		nbio;
1087 
1088 	/* hdp */
1089 	struct amdgpu_hdp		hdp;
1090 
1091 	/* smuio */
1092 	struct amdgpu_smuio		smuio;
1093 
1094 	/* mmhub */
1095 	struct amdgpu_mmhub		mmhub;
1096 
1097 	/* gfxhub */
1098 	struct amdgpu_gfxhub		gfxhub;
1099 
1100 	/* gfx */
1101 	struct amdgpu_gfx		gfx;
1102 
1103 	/* sdma */
1104 	struct amdgpu_sdma		sdma;
1105 
1106 	/* lsdma */
1107 	struct amdgpu_lsdma		lsdma;
1108 
1109 	/* uvd */
1110 	struct amdgpu_uvd		uvd;
1111 
1112 	/* vce */
1113 	struct amdgpu_vce		vce;
1114 
1115 	/* vcn */
1116 	struct amdgpu_vcn		vcn;
1117 
1118 	/* jpeg */
1119 	struct amdgpu_jpeg		jpeg;
1120 
1121 	/* vpe */
1122 	struct amdgpu_vpe		vpe;
1123 
1124 	/* umsch */
1125 	struct amdgpu_umsch_mm		umsch_mm;
1126 	bool				enable_umsch_mm;
1127 
1128 	/* firmwares */
1129 	struct amdgpu_firmware		firmware;
1130 
1131 	/* PSP */
1132 	struct psp_context		psp;
1133 
1134 	/* GDS */
1135 	struct amdgpu_gds		gds;
1136 
1137 	/* for userq and VM fences */
1138 	struct amdgpu_seq64		seq64;
1139 
1140 	/* KFD */
1141 	struct amdgpu_kfd_dev		kfd;
1142 
1143 	/* UMC */
1144 	struct amdgpu_umc		umc;
1145 
1146 	/* display related functionality */
1147 	struct amdgpu_display_manager dm;
1148 
1149 #if defined(CONFIG_DRM_AMD_ISP)
1150 	/* isp */
1151 	struct amdgpu_isp		isp;
1152 #endif
1153 
1154 	/* mes */
1155 	bool                            enable_mes;
1156 	bool                            enable_mes_kiq;
1157 	bool                            enable_uni_mes;
1158 	struct amdgpu_mes               mes;
1159 	struct amdgpu_mqd               mqds[AMDGPU_HW_IP_NUM];
1160 	const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
1161 
1162 	/* xarray used to retrieve the user queue fence driver reference
1163 	 * in the EOP interrupt handler to signal the particular user
1164 	 * queue fence.
1165 	 */
1166 	struct xarray			userq_xa;
1167 
1168 	/* df */
1169 	struct amdgpu_df                df;
1170 
1171 	/* MCA */
1172 	struct amdgpu_mca               mca;
1173 
1174 	/* ACA */
1175 	struct amdgpu_aca		aca;
1176 
1177 	/* CPER */
1178 	struct amdgpu_cper		cper;
1179 
1180 	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
1181 	uint32_t		        harvest_ip_mask;
1182 	int				num_ip_blocks;
1183 	struct mutex	mn_lock;
1184 	DECLARE_HASHTABLE(mn_hash, 7);
1185 
1186 	/* tracking pinned memory */
1187 	atomic64_t vram_pin_size;
1188 	atomic64_t visible_pin_size;
1189 	atomic64_t gart_pin_size;
1190 
1191 	/* soc15 register offset based on ip, instance and  segment */
1192 	uint32_t		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1193 	struct amdgpu_ip_map_info	ip_map;
1194 
1195 	/* delayed work_func for deferring clockgating during resume */
1196 	struct delayed_work     delayed_init_work;
1197 
1198 	struct amdgpu_virt	virt;
1199 
1200 	/* record hw reset is performed */
1201 	bool has_hw_reset;
1202 	u8				reset_magic[AMDGPU_RESET_MAGIC_NUM];
1203 
1204 	/* s3/s4 mask */
1205 	bool                            in_suspend;
1206 	bool				in_s3;
1207 	bool				in_s4;
1208 	bool				in_s0ix;
1209 	suspend_state_t			last_suspend_state;
1210 
1211 	enum pp_mp1_state               mp1_state;
1212 	struct amdgpu_doorbell_index doorbell_index;
1213 
1214 	struct mutex			notifier_lock;
1215 
1216 	int asic_reset_res;
1217 	struct work_struct		xgmi_reset_work;
1218 	struct list_head		reset_list;
1219 
1220 	long				gfx_timeout;
1221 	long				sdma_timeout;
1222 	long				video_timeout;
1223 	long				compute_timeout;
1224 	long				psp_timeout;
1225 
1226 	uint64_t			unique_id;
1227 	uint64_t	df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
1228 
1229 	/* enable runtime pm on the device */
1230 	bool                            in_runpm;
1231 	bool                            has_pr3;
1232 
1233 	bool                            ucode_sysfs_en;
1234 
1235 	struct amdgpu_fru_info		*fru_info;
1236 	atomic_t			throttling_logging_enabled;
1237 	struct ratelimit_state		throttling_logging_rs;
1238 	uint32_t                        ras_hw_enabled;
1239 	uint32_t                        ras_enabled;
1240 	bool                            ras_default_ecc_enabled;
1241 
1242 	bool                            no_hw_access;
1243 	struct pci_saved_state          *pci_state;
1244 	pci_channel_state_t		pci_channel_state;
1245 
1246 	struct amdgpu_pcie_reset_ctx	pcie_reset_ctx;
1247 
1248 	/* Track auto wait count on s_barrier settings */
1249 	bool				barrier_has_auto_waitcnt;
1250 
1251 	struct amdgpu_reset_control     *reset_cntl;
1252 	uint32_t                        ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
1253 
1254 	bool				ram_is_direct_mapped;
1255 
1256 	struct list_head                ras_list;
1257 
1258 	struct ip_discovery_top         *ip_top;
1259 
1260 	struct amdgpu_reset_domain	*reset_domain;
1261 
1262 	struct mutex			benchmark_mutex;
1263 
1264 	bool                            scpm_enabled;
1265 	uint32_t                        scpm_status;
1266 
1267 	struct work_struct		reset_work;
1268 
1269 	bool                            dc_enabled;
1270 	/* Mask of active clusters */
1271 	uint32_t			aid_mask;
1272 
1273 	/* Debug */
1274 	bool                            debug_vm;
1275 	bool                            debug_largebar;
1276 	bool                            debug_disable_soft_recovery;
1277 	bool                            debug_use_vram_fw_buf;
1278 	bool                            debug_enable_ras_aca;
1279 	bool                            debug_exp_resets;
1280 	bool                            debug_disable_gpu_ring_reset;
1281 	bool                            debug_vm_userptr;
1282 	bool                            debug_disable_ce_logs;
1283 
1284 	/* Protection for the following isolation structure */
1285 	struct mutex                    enforce_isolation_mutex;
1286 	enum amdgpu_enforce_isolation_mode	enforce_isolation[MAX_XCP];
1287 	struct amdgpu_isolation {
1288 		void			*owner;
1289 		struct dma_fence	*spearhead;
1290 		struct amdgpu_sync	active;
1291 		struct amdgpu_sync	prev;
1292 	} isolation[MAX_XCP];
1293 
1294 	struct amdgpu_init_level *init_lvl;
1295 
1296 	/* This flag is used to determine how VRAM allocations are handled for APUs
1297 	 * in KFD: VRAM or GTT.
1298 	 */
1299 	bool                            apu_prefer_gtt;
1300 
1301 	struct list_head		userq_mgr_list;
1302 	struct mutex                    userq_mutex;
1303 	bool                            userq_halt_for_enforce_isolation;
1304 };
1305 
1306 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
1307 					 uint8_t ip, uint8_t inst)
1308 {
1309 	/* This considers only major/minor/rev and ignores
1310 	 * subrevision/variant fields.
1311 	 */
1312 	return adev->ip_versions[ip][inst] & ~0xFFU;
1313 }
1314 
1315 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
1316 					      uint8_t ip, uint8_t inst)
1317 {
1318 	/* This returns full version - major/minor/rev/variant/subrevision */
1319 	return adev->ip_versions[ip][inst];
1320 }
1321 
1322 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
1323 {
1324 	return container_of(ddev, struct amdgpu_device, ddev);
1325 }
1326 
1327 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
1328 {
1329 	return &adev->ddev;
1330 }
1331 
1332 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
1333 {
1334 	return container_of(bdev, struct amdgpu_device, mman.bdev);
1335 }
1336 
1337 static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
1338 {
1339 	return !!adev->aid_mask;
1340 }
1341 
1342 int amdgpu_device_init(struct amdgpu_device *adev,
1343 		       uint32_t flags);
1344 void amdgpu_device_fini_hw(struct amdgpu_device *adev);
1345 void amdgpu_device_fini_sw(struct amdgpu_device *adev);
1346 
1347 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1348 
1349 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
1350 			     void *buf, size_t size, bool write);
1351 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1352 				 void *buf, size_t size, bool write);
1353 
1354 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
1355 			       void *buf, size_t size, bool write);
1356 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
1357 			    uint32_t inst, uint32_t reg_addr, char reg_name[],
1358 			    uint32_t expected_value, uint32_t mask);
1359 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
1360 			    uint32_t reg, uint32_t acc_flags);
1361 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1362 				    u64 reg_addr);
1363 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
1364 				uint32_t reg, uint32_t acc_flags,
1365 				uint32_t xcc_id);
1366 void amdgpu_device_wreg(struct amdgpu_device *adev,
1367 			uint32_t reg, uint32_t v,
1368 			uint32_t acc_flags);
1369 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1370 				     u64 reg_addr, u32 reg_data);
1371 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
1372 			    uint32_t reg, uint32_t v,
1373 			    uint32_t acc_flags,
1374 			    uint32_t xcc_id);
1375 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1376 			     uint32_t reg, uint32_t v, uint32_t xcc_id);
1377 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1378 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1379 
1380 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1381 				u32 reg_addr);
1382 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1383 				  u32 reg_addr);
1384 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1385 				  u64 reg_addr);
1386 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1387 				 u32 reg_addr, u32 reg_data);
1388 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1389 				   u32 reg_addr, u64 reg_data);
1390 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1391 				   u64 reg_addr, u64 reg_data);
1392 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
1393 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
1394 				       enum amd_asic_type asic_type);
1395 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1396 
1397 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
1398 
1399 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
1400 				 struct amdgpu_reset_context *reset_context);
1401 
1402 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1403 			 struct amdgpu_reset_context *reset_context);
1404 
1405 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
1406 
1407 int emu_soc_asic_init(struct amdgpu_device *adev);
1408 
1409 /*
1410  * Registers read & write functions.
1411  */
1412 #define AMDGPU_REGS_NO_KIQ    (1<<1)
1413 #define AMDGPU_REGS_RLC	(1<<2)
1414 
1415 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1416 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1417 
1418 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
1419 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
1420 
1421 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1422 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1423 
1424 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
1425 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
1426 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
1427 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1428 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1429 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
1430 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
1431 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1432 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1433 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1434 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1435 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
1436 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
1437 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
1438 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
1439 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
1440 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
1441 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1442 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1443 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1444 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1445 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1446 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1447 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1448 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1449 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1450 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1451 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1452 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1453 #define WREG32_P(reg, val, mask)				\
1454 	do {							\
1455 		uint32_t tmp_ = RREG32(reg);			\
1456 		tmp_ &= (mask);					\
1457 		tmp_ |= ((val) & ~(mask));			\
1458 		WREG32(reg, tmp_);				\
1459 	} while (0)
1460 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1461 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1462 #define WREG32_PLL_P(reg, val, mask)				\
1463 	do {							\
1464 		uint32_t tmp_ = RREG32_PLL(reg);		\
1465 		tmp_ &= (mask);					\
1466 		tmp_ |= ((val) & ~(mask));			\
1467 		WREG32_PLL(reg, tmp_);				\
1468 	} while (0)
1469 
1470 #define WREG32_SMC_P(_Reg, _Val, _Mask)                         \
1471 	do {                                                    \
1472 		u32 tmp = RREG32_SMC(_Reg);                     \
1473 		tmp &= (_Mask);                                 \
1474 		tmp |= ((_Val) & ~(_Mask));                     \
1475 		WREG32_SMC(_Reg, tmp);                          \
1476 	} while (0)
1477 
1478 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
1479 
1480 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1481 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1482 
1483 #define REG_SET_FIELD(orig_val, reg, field, field_val)			\
1484 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
1485 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1486 
1487 #define REG_GET_FIELD(value, reg, field)				\
1488 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1489 
1490 #define WREG32_FIELD(reg, field, val)	\
1491 	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1492 
1493 #define WREG32_FIELD_OFFSET(reg, offset, field, val)	\
1494 	WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1495 
1496 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
1497 /*
1498  * BIOS helpers.
1499  */
1500 #define RBIOS8(i) (adev->bios[i])
1501 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1502 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1503 
1504 /*
1505  * ASICs macro.
1506  */
1507 #define amdgpu_asic_set_vga_state(adev, state) \
1508     ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1509 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1510 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
1511 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1512 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1513 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1514 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1515 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1516 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1517 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1518 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1519 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1520 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1521 #define amdgpu_asic_flush_hdp(adev, r) \
1522 	((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
1523 #define amdgpu_asic_invalidate_hdp(adev, r) \
1524 	((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
1525 	 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
1526 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1527 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1528 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1529 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1530 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
1531 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
1532 #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
1533 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
1534 	((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
1535 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
1536 
1537 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter))
1538 
1539 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
1540 #define for_each_inst(i, inst_mask)        \
1541 	for (i = ffs(inst_mask); i-- != 0; \
1542 	     i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
1543 
1544 /* Common functions */
1545 bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
1546 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
1547 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1548 			      struct amdgpu_job *job,
1549 			      struct amdgpu_reset_context *reset_context);
1550 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1551 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1552 bool amdgpu_device_need_post(struct amdgpu_device *adev);
1553 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
1554 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1555 
1556 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1557 				  u64 num_vis_bytes);
1558 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1559 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1560 					     const u32 *registers,
1561 					     const u32 array_size);
1562 
1563 int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
1564 int amdgpu_device_link_reset(struct amdgpu_device *adev);
1565 bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
1566 bool amdgpu_device_supports_px(struct amdgpu_device *adev);
1567 bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
1568 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
1569 int amdgpu_device_supports_baco(struct amdgpu_device *adev);
1570 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
1571 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
1572 				      struct amdgpu_device *peer_adev);
1573 int amdgpu_device_baco_enter(struct amdgpu_device *adev);
1574 int amdgpu_device_baco_exit(struct amdgpu_device *adev);
1575 
1576 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
1577 		struct amdgpu_ring *ring);
1578 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
1579 		struct amdgpu_ring *ring);
1580 
1581 void amdgpu_device_halt(struct amdgpu_device *adev);
1582 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
1583 				u32 reg);
1584 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
1585 				u32 reg, u32 v);
1586 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
1587 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
1588 					    struct dma_fence *gang);
1589 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
1590 						  struct amdgpu_ring *ring,
1591 						  struct amdgpu_job *job);
1592 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
1593 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
1594 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
1595 
1596 /* atpx handler */
1597 #if defined(CONFIG_VGA_SWITCHEROO)
1598 void amdgpu_register_atpx_handler(void);
1599 void amdgpu_unregister_atpx_handler(void);
1600 bool amdgpu_has_atpx_dgpu_power_cntl(void);
1601 bool amdgpu_is_atpx_hybrid(void);
1602 bool amdgpu_has_atpx(void);
1603 #else
1604 static inline void amdgpu_register_atpx_handler(void) {}
1605 static inline void amdgpu_unregister_atpx_handler(void) {}
1606 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1607 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
1608 static inline bool amdgpu_has_atpx(void) { return false; }
1609 #endif
1610 
1611 /*
1612  * KMS
1613  */
1614 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1615 extern const int amdgpu_max_kms_ioctl;
1616 
1617 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
1618 void amdgpu_driver_unload_kms(struct drm_device *dev);
1619 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1620 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1621 				 struct drm_file *file_priv);
1622 void amdgpu_driver_release_kms(struct drm_device *dev);
1623 
1624 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1625 int amdgpu_device_prepare(struct drm_device *dev);
1626 void amdgpu_device_complete(struct drm_device *dev);
1627 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
1628 int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
1629 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
1630 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
1631 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
1632 int amdgpu_info_ioctl(struct drm_device *dev, void *data,
1633 		      struct drm_file *filp);
1634 
1635 /*
1636  * functions used by amdgpu_encoder.c
1637  */
1638 struct amdgpu_afmt_acr {
1639 	u32 clock;
1640 
1641 	int n_32khz;
1642 	int cts_32khz;
1643 
1644 	int n_44_1khz;
1645 	int cts_44_1khz;
1646 
1647 	int n_48khz;
1648 	int cts_48khz;
1649 
1650 };
1651 
1652 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1653 
1654 /* amdgpu_acpi.c */
1655 
1656 struct amdgpu_numa_info {
1657 	uint64_t size;
1658 	int pxm;
1659 	int nid;
1660 };
1661 
1662 /* ATCS Device/Driver State */
1663 #define AMDGPU_ATCS_PSC_DEV_STATE_D0		0
1664 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT	3
1665 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR		0
1666 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR	1
1667 
1668 #if defined(CONFIG_ACPI)
1669 int amdgpu_acpi_init(struct amdgpu_device *adev);
1670 void amdgpu_acpi_fini(struct amdgpu_device *adev);
1671 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1672 bool amdgpu_acpi_is_power_shift_control_supported(void);
1673 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1674 						u8 perf_req, bool advertise);
1675 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1676 				    u8 dev_state, bool drv_state);
1677 int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
1678 				   enum amdgpu_ss ss_state);
1679 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1680 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
1681 			     u64 *tmr_size);
1682 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
1683 			     struct amdgpu_numa_info *numa_info);
1684 
1685 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1686 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
1687 void amdgpu_acpi_detect(void);
1688 void amdgpu_acpi_release(void);
1689 #else
1690 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1691 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
1692 					   u64 *tmr_offset, u64 *tmr_size)
1693 {
1694 	return -EINVAL;
1695 }
1696 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
1697 					   int xcc_id,
1698 					   struct amdgpu_numa_info *numa_info)
1699 {
1700 	return -EINVAL;
1701 }
1702 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1703 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
1704 static inline void amdgpu_acpi_detect(void) { }
1705 static inline void amdgpu_acpi_release(void) { }
1706 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
1707 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1708 						  u8 dev_state, bool drv_state) { return 0; }
1709 static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
1710 						 enum amdgpu_ss ss_state)
1711 {
1712 	return 0;
1713 }
1714 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
1715 #endif
1716 
1717 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1718 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1719 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1720 #else
1721 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1722 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1723 #endif
1724 
1725 #if defined(CONFIG_DRM_AMD_ISP)
1726 int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
1727 #endif
1728 
1729 void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
1730 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
1731 
1732 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
1733 					   pci_channel_state_t state);
1734 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
1735 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
1736 void amdgpu_pci_resume(struct pci_dev *pdev);
1737 
1738 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
1739 bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
1740 
1741 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
1742 
1743 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1744 			       enum amd_clockgating_state state);
1745 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
1746 			       enum amd_powergating_state state);
1747 
1748 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
1749 {
1750 	return amdgpu_gpu_recovery != 0 &&
1751 		adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
1752 		adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
1753 		adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
1754 		adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
1755 }
1756 
1757 #include "amdgpu_object.h"
1758 
1759 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
1760 {
1761        return adev->gmc.tmz_enabled;
1762 }
1763 
1764 int amdgpu_in_reset(struct amdgpu_device *adev);
1765 
1766 extern const struct attribute_group amdgpu_vram_mgr_attr_group;
1767 extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
1768 extern const struct attribute_group amdgpu_flash_attr_group;
1769 
1770 void amdgpu_set_init_level(struct amdgpu_device *adev,
1771 			   enum amdgpu_init_lvl_id lvl);
1772 
1773 static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
1774 {
1775        u32 status;
1776        int r;
1777 
1778        r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
1779        if (r || PCI_POSSIBLE_ERROR(status)) {
1780 		dev_err(adev->dev, "device lost from bus!");
1781 		return -ENODEV;
1782        }
1783 
1784        return 0;
1785 }
1786 
1787 #endif
1788