xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h (revision 9557b4376d02088a33e5f4116bcc324d35a3b64c)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3  * Copyright 2020-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef KFD_SMI_EVENTS_H_INCLUDED
25 #define KFD_SMI_EVENTS_H_INCLUDED
26 
27 struct amdgpu_reset_context;
28 
29 int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd);
30 void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid);
31 void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
32 					     uint64_t throttle_bitmask);
33 void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset,
34 				    struct amdgpu_reset_context *reset_context);
35 void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
36 				    unsigned long address, bool write_fault,
37 				    ktime_t ts);
38 void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
39 				  unsigned long address, bool migration);
40 void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
41 			     unsigned long start, unsigned long end,
42 			     uint32_t from, uint32_t to,
43 			     uint32_t prefetch_loc, uint32_t preferred_loc,
44 			     uint32_t trigger);
45 void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
46 			     unsigned long start, unsigned long end,
47 			     uint32_t from, uint32_t to, uint32_t trigger);
48 void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
49 				  uint32_t trigger);
50 void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
51 void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
52 void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
53 				  unsigned long address, unsigned long last,
54 				  uint32_t trigger);
55 #endif
56