1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Tegra host1x Syncpoints 4 * 5 * Copyright (c) 2010-2015, NVIDIA Corporation. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/device.h> 10 #include <linux/dma-fence.h> 11 #include <linux/slab.h> 12 13 #include <trace/events/host1x.h> 14 15 #include "syncpt.h" 16 #include "dev.h" 17 #include "intr.h" 18 #include "debug.h" 19 20 #define SYNCPT_CHECK_PERIOD (2 * HZ) 21 #define MAX_STUCK_CHECK_COUNT 15 22 23 static struct host1x_syncpt_base * 24 host1x_syncpt_base_request(struct host1x *host) 25 { 26 struct host1x_syncpt_base *bases = host->bases; 27 unsigned int i; 28 29 for (i = 0; i < host->info->nb_bases; i++) 30 if (!bases[i].requested) 31 break; 32 33 if (i >= host->info->nb_bases) 34 return NULL; 35 36 bases[i].requested = true; 37 return &bases[i]; 38 } 39 40 static void host1x_syncpt_base_free(struct host1x_syncpt_base *base) 41 { 42 if (base) 43 base->requested = false; 44 } 45 46 /** 47 * host1x_syncpt_alloc() - allocate a syncpoint 48 * @host: host1x device data 49 * @flags: bitfield of HOST1X_SYNCPT_* flags 50 * @name: name for the syncpoint for use in debug prints 51 * 52 * Allocates a hardware syncpoint for the caller's use. The caller then has 53 * the sole authority to mutate the syncpoint's value until it is freed again. 54 * 55 * If no free syncpoints are available, or a NULL name was specified, returns 56 * NULL. 57 */ 58 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, 59 unsigned long flags, 60 const char *name) 61 { 62 struct host1x_syncpt *sp = host->syncpt; 63 char *full_name; 64 unsigned int i; 65 66 if (!name) 67 return NULL; 68 69 mutex_lock(&host->syncpt_mutex); 70 71 for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++) 72 ; 73 74 if (i >= host->info->nb_pts) 75 goto unlock; 76 77 if (flags & HOST1X_SYNCPT_HAS_BASE) { 78 sp->base = host1x_syncpt_base_request(host); 79 if (!sp->base) 80 goto unlock; 81 } 82 83 full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name); 84 if (!full_name) 85 goto free_base; 86 87 sp->name = full_name; 88 89 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED) 90 sp->client_managed = true; 91 else 92 sp->client_managed = false; 93 94 kref_init(&sp->ref); 95 96 mutex_unlock(&host->syncpt_mutex); 97 return sp; 98 99 free_base: 100 host1x_syncpt_base_free(sp->base); 101 sp->base = NULL; 102 unlock: 103 mutex_unlock(&host->syncpt_mutex); 104 return NULL; 105 } 106 EXPORT_SYMBOL(host1x_syncpt_alloc); 107 108 /** 109 * host1x_syncpt_id() - retrieve syncpoint ID 110 * @sp: host1x syncpoint 111 * 112 * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is 113 * often used as a value to program into registers that control how hardware 114 * blocks interact with syncpoints. 115 */ 116 u32 host1x_syncpt_id(struct host1x_syncpt *sp) 117 { 118 return sp->id; 119 } 120 EXPORT_SYMBOL(host1x_syncpt_id); 121 122 /** 123 * host1x_syncpt_incr_max() - update the value sent to hardware 124 * @sp: host1x syncpoint 125 * @incrs: number of increments 126 */ 127 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs) 128 { 129 return (u32)atomic_add_return(incrs, &sp->max_val); 130 } 131 EXPORT_SYMBOL(host1x_syncpt_incr_max); 132 133 /* 134 * Write cached syncpoint and waitbase values to hardware. 135 */ 136 void host1x_syncpt_restore(struct host1x *host) 137 { 138 struct host1x_syncpt *sp_base = host->syncpt; 139 unsigned int i; 140 141 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { 142 /* 143 * Unassign syncpt from channels for purposes of Tegra186 144 * syncpoint protection. This prevents any channel from 145 * accessing it until it is reassigned. 146 */ 147 host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL); 148 host1x_hw_syncpt_restore(host, sp_base + i); 149 } 150 151 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) 152 host1x_hw_syncpt_restore_wait_base(host, sp_base + i); 153 154 host1x_hw_syncpt_enable_protection(host); 155 156 wmb(); 157 } 158 159 /* 160 * Update the cached syncpoint and waitbase values by reading them 161 * from the registers. 162 */ 163 void host1x_syncpt_save(struct host1x *host) 164 { 165 struct host1x_syncpt *sp_base = host->syncpt; 166 unsigned int i; 167 168 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { 169 if (host1x_syncpt_client_managed(sp_base + i)) 170 host1x_hw_syncpt_load(host, sp_base + i); 171 else 172 WARN_ON(!host1x_syncpt_idle(sp_base + i)); 173 } 174 175 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) 176 host1x_hw_syncpt_load_wait_base(host, sp_base + i); 177 } 178 179 /* 180 * Updates the cached syncpoint value by reading a new value from the hardware 181 * register 182 */ 183 u32 host1x_syncpt_load(struct host1x_syncpt *sp) 184 { 185 u32 val; 186 187 val = host1x_hw_syncpt_load(sp->host, sp); 188 trace_host1x_syncpt_load_min(sp->id, val); 189 190 return val; 191 } 192 193 /* 194 * Get the current syncpoint base 195 */ 196 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) 197 { 198 host1x_hw_syncpt_load_wait_base(sp->host, sp); 199 200 return sp->base_val; 201 } 202 203 /** 204 * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache 205 * @sp: host1x syncpoint 206 */ 207 int host1x_syncpt_incr(struct host1x_syncpt *sp) 208 { 209 return host1x_hw_syncpt_cpu_incr(sp->host, sp); 210 } 211 EXPORT_SYMBOL(host1x_syncpt_incr); 212 213 /** 214 * host1x_syncpt_wait() - wait for a syncpoint to reach a given value 215 * @sp: host1x syncpoint 216 * @thresh: threshold 217 * @timeout: maximum time to wait for the syncpoint to reach the given value 218 * @value: return location for the syncpoint value 219 */ 220 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, 221 u32 *value) 222 { 223 struct dma_fence *fence; 224 long wait_err; 225 226 host1x_hw_syncpt_load(sp->host, sp); 227 228 if (value) 229 *value = host1x_syncpt_load(sp); 230 231 if (host1x_syncpt_is_expired(sp, thresh)) 232 return 0; 233 234 if (timeout < 0) 235 timeout = LONG_MAX; 236 else if (timeout == 0) 237 return -EAGAIN; 238 239 fence = host1x_fence_create(sp, thresh, false); 240 if (IS_ERR(fence)) 241 return PTR_ERR(fence); 242 243 wait_err = dma_fence_wait_timeout(fence, true, timeout); 244 if (wait_err == 0) 245 host1x_fence_cancel(fence); 246 dma_fence_put(fence); 247 248 if (value) 249 *value = host1x_syncpt_load(sp); 250 251 /* 252 * Don't rely on dma_fence_wait_timeout return value, 253 * since it returns zero both on timeout and if the 254 * wait completed with 0 jiffies left. 255 */ 256 host1x_hw_syncpt_load(sp->host, sp); 257 if (wait_err == 0 && !host1x_syncpt_is_expired(sp, thresh)) 258 return -EAGAIN; 259 else if (wait_err < 0) 260 return wait_err; 261 else 262 return 0; 263 } 264 EXPORT_SYMBOL(host1x_syncpt_wait); 265 266 /* 267 * Returns true if syncpoint is expired, false if we may need to wait 268 */ 269 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) 270 { 271 u32 current_val; 272 273 smp_rmb(); 274 275 current_val = (u32)atomic_read(&sp->min_val); 276 277 return ((current_val - thresh) & 0x80000000U) == 0U; 278 } 279 280 int host1x_syncpt_init(struct host1x *host) 281 { 282 struct host1x_syncpt_base *bases; 283 struct host1x_syncpt *syncpt; 284 unsigned int i; 285 286 syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt), 287 GFP_KERNEL); 288 if (!syncpt) 289 return -ENOMEM; 290 291 bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases), 292 GFP_KERNEL); 293 if (!bases) 294 return -ENOMEM; 295 296 for (i = 0; i < host->info->nb_pts; i++) { 297 syncpt[i].id = i; 298 syncpt[i].host = host; 299 } 300 301 for (i = 0; i < host->info->nb_bases; i++) 302 bases[i].id = i; 303 304 mutex_init(&host->syncpt_mutex); 305 host->syncpt = syncpt; 306 host->bases = bases; 307 308 /* Allocate sync point to use for clearing waits for expired fences */ 309 host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop"); 310 if (!host->nop_sp) 311 return -ENOMEM; 312 313 if (host->info->reserve_vblank_syncpts) { 314 kref_init(&host->syncpt[26].ref); 315 kref_init(&host->syncpt[27].ref); 316 } 317 318 return 0; 319 } 320 321 /** 322 * host1x_syncpt_request() - request a syncpoint 323 * @client: client requesting the syncpoint 324 * @flags: flags 325 * 326 * host1x client drivers can use this function to allocate a syncpoint for 327 * subsequent use. A syncpoint returned by this function will be reserved for 328 * use by the client exclusively. When no longer using a syncpoint, a host1x 329 * client driver needs to release it using host1x_syncpt_put(). 330 */ 331 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, 332 unsigned long flags) 333 { 334 struct host1x *host = dev_get_drvdata(client->host->parent); 335 336 return host1x_syncpt_alloc(host, flags, dev_name(client->dev)); 337 } 338 EXPORT_SYMBOL(host1x_syncpt_request); 339 340 static void syncpt_release(struct kref *ref) 341 { 342 struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref); 343 344 atomic_set(&sp->max_val, host1x_syncpt_read(sp)); 345 346 sp->locked = false; 347 348 host1x_syncpt_base_free(sp->base); 349 kfree(sp->name); 350 sp->base = NULL; 351 sp->name = NULL; 352 sp->client_managed = false; 353 354 mutex_unlock(&sp->host->syncpt_mutex); 355 } 356 357 /** 358 * host1x_syncpt_put() - free a requested syncpoint 359 * @sp: host1x syncpoint 360 * 361 * Release a syncpoint previously allocated using host1x_syncpt_request(). A 362 * host1x client driver should call this when the syncpoint is no longer in 363 * use. 364 */ 365 void host1x_syncpt_put(struct host1x_syncpt *sp) 366 { 367 if (!sp) 368 return; 369 370 kref_put_mutex(&sp->ref, syncpt_release, &sp->host->syncpt_mutex); 371 } 372 EXPORT_SYMBOL(host1x_syncpt_put); 373 374 void host1x_syncpt_deinit(struct host1x *host) 375 { 376 struct host1x_syncpt *sp = host->syncpt; 377 unsigned int i; 378 379 for (i = 0; i < host->info->nb_pts; i++, sp++) 380 kfree(sp->name); 381 } 382 383 /** 384 * host1x_syncpt_read_max() - read maximum syncpoint value 385 * @sp: host1x syncpoint 386 * 387 * The maximum syncpoint value indicates how many operations there are in 388 * queue, either in channel or in a software thread. 389 */ 390 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) 391 { 392 smp_rmb(); 393 394 return (u32)atomic_read(&sp->max_val); 395 } 396 EXPORT_SYMBOL(host1x_syncpt_read_max); 397 398 /** 399 * host1x_syncpt_read_min() - read minimum syncpoint value 400 * @sp: host1x syncpoint 401 * 402 * The minimum syncpoint value is a shadow of the current sync point value in 403 * hardware. 404 */ 405 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) 406 { 407 smp_rmb(); 408 409 return (u32)atomic_read(&sp->min_val); 410 } 411 EXPORT_SYMBOL(host1x_syncpt_read_min); 412 413 /** 414 * host1x_syncpt_read() - read the current syncpoint value 415 * @sp: host1x syncpoint 416 */ 417 u32 host1x_syncpt_read(struct host1x_syncpt *sp) 418 { 419 return host1x_syncpt_load(sp); 420 } 421 EXPORT_SYMBOL(host1x_syncpt_read); 422 423 unsigned int host1x_syncpt_nb_pts(struct host1x *host) 424 { 425 return host->info->nb_pts; 426 } 427 428 unsigned int host1x_syncpt_nb_bases(struct host1x *host) 429 { 430 return host->info->nb_bases; 431 } 432 433 unsigned int host1x_syncpt_nb_mlocks(struct host1x *host) 434 { 435 return host->info->nb_mlocks; 436 } 437 438 /** 439 * host1x_syncpt_get_by_id() - obtain a syncpoint by ID 440 * @host: host1x controller 441 * @id: syncpoint ID 442 */ 443 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, 444 unsigned int id) 445 { 446 if (id >= host->info->nb_pts) 447 return NULL; 448 449 if (kref_get_unless_zero(&host->syncpt[id].ref)) 450 return &host->syncpt[id]; 451 else 452 return NULL; 453 } 454 EXPORT_SYMBOL(host1x_syncpt_get_by_id); 455 456 /** 457 * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't 458 * increase the refcount. 459 * @host: host1x controller 460 * @id: syncpoint ID 461 */ 462 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, 463 unsigned int id) 464 { 465 if (id >= host->info->nb_pts) 466 return NULL; 467 468 return &host->syncpt[id]; 469 } 470 EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref); 471 472 /** 473 * host1x_syncpt_get() - increment syncpoint refcount 474 * @sp: syncpoint 475 */ 476 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp) 477 { 478 kref_get(&sp->ref); 479 480 return sp; 481 } 482 EXPORT_SYMBOL(host1x_syncpt_get); 483 484 /** 485 * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint 486 * @sp: host1x syncpoint 487 */ 488 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp) 489 { 490 return sp ? sp->base : NULL; 491 } 492 EXPORT_SYMBOL(host1x_syncpt_get_base); 493 494 /** 495 * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base 496 * @base: host1x syncpoint wait base 497 */ 498 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base) 499 { 500 return base->id; 501 } 502 EXPORT_SYMBOL(host1x_syncpt_base_id); 503 504 static void do_nothing(struct kref *ref) 505 { 506 } 507 508 /** 509 * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint 510 * available for allocation 511 * 512 * @client: host1x bus client 513 * @syncpt_id: syncpoint ID to make available 514 * 515 * Makes VBLANK<i> syncpoint available for allocatation if it was 516 * reserved at initialization time. This should be called by the display 517 * driver after it has ensured that any VBLANK increment programming configured 518 * by the boot chain has been disabled. 519 */ 520 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, 521 u32 syncpt_id) 522 { 523 struct host1x *host = dev_get_drvdata(client->host->parent); 524 525 if (!host->info->reserve_vblank_syncpts) 526 return; 527 528 kref_put(&host->syncpt[syncpt_id].ref, do_nothing); 529 } 530 EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation); 531