1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <dev/drm2/drmP.h> 32 #include <dev/drm2/ttm/ttm_execbuf_util.h> 33 #include <dev/drm2/ttm/ttm_bo_driver.h> 34 #include <dev/drm2/ttm/ttm_placement.h> 35 36 static void ttm_eu_backoff_reservation_locked(struct list_head *list) 37 { 38 struct ttm_validate_buffer *entry; 39 40 list_for_each_entry(entry, list, head) { 41 struct ttm_buffer_object *bo = entry->bo; 42 if (!entry->reserved) 43 continue; 44 45 if (entry->removed) { 46 ttm_bo_add_to_lru(bo); 47 entry->removed = false; 48 49 } 50 entry->reserved = false; 51 atomic_set(&bo->reserved, 0); 52 wakeup(bo); 53 } 54 } 55 56 static void ttm_eu_del_from_lru_locked(struct list_head *list) 57 { 58 struct ttm_validate_buffer *entry; 59 60 list_for_each_entry(entry, list, head) { 61 struct ttm_buffer_object *bo = entry->bo; 62 if (!entry->reserved) 63 continue; 64 65 if (!entry->removed) { 66 entry->put_count = ttm_bo_del_from_lru(bo); 67 entry->removed = true; 68 } 69 } 70 } 71 72 static void ttm_eu_list_ref_sub(struct list_head *list) 73 { 74 struct ttm_validate_buffer *entry; 75 76 list_for_each_entry(entry, list, head) { 77 struct ttm_buffer_object *bo = entry->bo; 78 79 if (entry->put_count) { 80 ttm_bo_list_ref_sub(bo, entry->put_count, true); 81 entry->put_count = 0; 82 } 83 } 84 } 85 86 static int ttm_eu_wait_unreserved_locked(struct list_head *list, 87 struct ttm_buffer_object *bo) 88 { 89 int ret; 90 91 ttm_eu_del_from_lru_locked(list); 92 ret = ttm_bo_wait_unreserved_locked(bo, true); 93 if (unlikely(ret != 0)) 94 ttm_eu_backoff_reservation_locked(list); 95 return ret; 96 } 97 98 99 void ttm_eu_backoff_reservation(struct list_head *list) 100 { 101 struct ttm_validate_buffer *entry; 102 struct ttm_bo_global *glob; 103 104 if (list_empty(list)) 105 return; 106 107 entry = list_first_entry(list, struct ttm_validate_buffer, head); 108 glob = entry->bo->glob; 109 mtx_lock(&glob->lru_lock); 110 ttm_eu_backoff_reservation_locked(list); 111 mtx_unlock(&glob->lru_lock); 112 } 113 114 /* 115 * Reserve buffers for validation. 116 * 117 * If a buffer in the list is marked for CPU access, we back off and 118 * wait for that buffer to become free for GPU access. 119 * 120 * If a buffer is reserved for another validation, the validator with 121 * the highest validation sequence backs off and waits for that buffer 122 * to become unreserved. This prevents deadlocks when validating multiple 123 * buffers in different orders. 124 */ 125 126 int ttm_eu_reserve_buffers(struct list_head *list) 127 { 128 struct ttm_bo_global *glob; 129 struct ttm_validate_buffer *entry; 130 int ret; 131 uint32_t val_seq; 132 133 if (list_empty(list)) 134 return 0; 135 136 list_for_each_entry(entry, list, head) { 137 entry->reserved = false; 138 entry->put_count = 0; 139 entry->removed = false; 140 } 141 142 entry = list_first_entry(list, struct ttm_validate_buffer, head); 143 glob = entry->bo->glob; 144 145 mtx_lock(&glob->lru_lock); 146 retry_locked: 147 val_seq = entry->bo->bdev->val_seq++; 148 149 list_for_each_entry(entry, list, head) { 150 struct ttm_buffer_object *bo = entry->bo; 151 152 retry_this_bo: 153 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); 154 switch (ret) { 155 case 0: 156 break; 157 case -EBUSY: 158 ret = ttm_eu_wait_unreserved_locked(list, bo); 159 if (unlikely(ret != 0)) { 160 mtx_unlock(&glob->lru_lock); 161 ttm_eu_list_ref_sub(list); 162 return ret; 163 } 164 goto retry_this_bo; 165 case -EAGAIN: 166 ttm_eu_backoff_reservation_locked(list); 167 ttm_eu_list_ref_sub(list); 168 ret = ttm_bo_wait_unreserved_locked(bo, true); 169 if (unlikely(ret != 0)) { 170 mtx_unlock(&glob->lru_lock); 171 return ret; 172 } 173 goto retry_locked; 174 default: 175 ttm_eu_backoff_reservation_locked(list); 176 mtx_unlock(&glob->lru_lock); 177 ttm_eu_list_ref_sub(list); 178 return ret; 179 } 180 181 entry->reserved = true; 182 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 183 ttm_eu_backoff_reservation_locked(list); 184 mtx_unlock(&glob->lru_lock); 185 ttm_eu_list_ref_sub(list); 186 return -EBUSY; 187 } 188 } 189 190 ttm_eu_del_from_lru_locked(list); 191 mtx_unlock(&glob->lru_lock); 192 ttm_eu_list_ref_sub(list); 193 194 return 0; 195 } 196 197 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 198 { 199 struct ttm_validate_buffer *entry; 200 struct ttm_buffer_object *bo; 201 struct ttm_bo_global *glob; 202 struct ttm_bo_device *bdev; 203 struct ttm_bo_driver *driver; 204 205 if (list_empty(list)) 206 return; 207 208 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; 209 bdev = bo->bdev; 210 driver = bdev->driver; 211 glob = bo->glob; 212 213 mtx_lock(&glob->lru_lock); 214 mtx_lock(&bdev->fence_lock); 215 216 list_for_each_entry(entry, list, head) { 217 bo = entry->bo; 218 entry->old_sync_obj = bo->sync_obj; 219 bo->sync_obj = driver->sync_obj_ref(sync_obj); 220 ttm_bo_unreserve_locked(bo); 221 entry->reserved = false; 222 } 223 mtx_unlock(&bdev->fence_lock); 224 mtx_unlock(&glob->lru_lock); 225 226 list_for_each_entry(entry, list, head) { 227 if (entry->old_sync_obj) 228 driver->sync_obj_unref(&entry->old_sync_obj); 229 } 230 } 231