1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Tegra host1x Syncpoints 4 * 5 * Copyright (c) 2010-2015, NVIDIA Corporation. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/device.h> 10 #include <linux/dma-fence.h> 11 #include <linux/slab.h> 12 13 #include <trace/events/host1x.h> 14 15 #include "syncpt.h" 16 #include "dev.h" 17 #include "intr.h" 18 #include "debug.h" 19 20 #define SYNCPT_CHECK_PERIOD (2 * HZ) 21 #define MAX_STUCK_CHECK_COUNT 15 22 23 static struct host1x_syncpt_base * 24 host1x_syncpt_base_request(struct host1x *host) 25 { 26 struct host1x_syncpt_base *bases = host->bases; 27 unsigned int i; 28 29 for (i = 0; i < host->info->nb_bases; i++) 30 if (!bases[i].requested) 31 break; 32 33 if (i >= host->info->nb_bases) 34 return NULL; 35 36 bases[i].requested = true; 37 return &bases[i]; 38 } 39 40 static void host1x_syncpt_base_free(struct host1x_syncpt_base *base) 41 { 42 if (base) 43 base->requested = false; 44 } 45 46 /** 47 * host1x_syncpt_alloc() - allocate a syncpoint 48 * @host: host1x device data 49 * @flags: bitfield of HOST1X_SYNCPT_* flags 50 * @name: name for the syncpoint for use in debug prints 51 * 52 * Allocates a hardware syncpoint for the caller's use. The caller then has 53 * the sole authority to mutate the syncpoint's value until it is freed again. 54 * 55 * If no free syncpoints are available, or a NULL name was specified, returns 56 * NULL. 57 */ 58 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, 59 unsigned long flags, 60 const char *name) 61 { 62 struct host1x_syncpt *sp = host->syncpt; 63 char *full_name; 64 unsigned int i; 65 66 if (!name) 67 return NULL; 68 69 mutex_lock(&host->syncpt_mutex); 70 71 for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++) 72 ; 73 74 if (i >= host->info->nb_pts) 75 goto unlock; 76 77 if (flags & HOST1X_SYNCPT_HAS_BASE) { 78 sp->base = host1x_syncpt_base_request(host); 79 if (!sp->base) 80 goto unlock; 81 } 82 83 full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name); 84 if (!full_name) 85 goto free_base; 86 87 sp->name = full_name; 88 89 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED) 90 sp->client_managed = true; 91 else 92 sp->client_managed = false; 93 94 kref_init(&sp->ref); 95 96 mutex_unlock(&host->syncpt_mutex); 97 return sp; 98 99 free_base: 100 host1x_syncpt_base_free(sp->base); 101 sp->base = NULL; 102 unlock: 103 mutex_unlock(&host->syncpt_mutex); 104 return NULL; 105 } 106 EXPORT_SYMBOL(host1x_syncpt_alloc); 107 108 /** 109 * host1x_syncpt_id() - retrieve syncpoint ID 110 * @sp: host1x syncpoint 111 * 112 * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is 113 * often used as a value to program into registers that control how hardware 114 * blocks interact with syncpoints. 115 */ 116 u32 host1x_syncpt_id(struct host1x_syncpt *sp) 117 { 118 return sp->id; 119 } 120 EXPORT_SYMBOL(host1x_syncpt_id); 121 122 /** 123 * host1x_syncpt_incr_max() - update the value sent to hardware 124 * @sp: host1x syncpoint 125 * @incrs: number of increments 126 */ 127 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs) 128 { 129 return (u32)atomic_add_return(incrs, &sp->max_val); 130 } 131 EXPORT_SYMBOL(host1x_syncpt_incr_max); 132 133 /* 134 * Write cached syncpoint and waitbase values to hardware. 135 */ 136 void host1x_syncpt_restore(struct host1x *host) 137 { 138 struct host1x_syncpt *sp_base = host->syncpt; 139 unsigned int i; 140 141 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { 142 /* 143 * Unassign syncpt from channels for purposes of Tegra186 144 * syncpoint protection. This prevents any channel from 145 * accessing it until it is reassigned. 146 */ 147 host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL); 148 host1x_hw_syncpt_restore(host, sp_base + i); 149 } 150 151 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) 152 host1x_hw_syncpt_restore_wait_base(host, sp_base + i); 153 154 host1x_hw_syncpt_enable_protection(host); 155 156 wmb(); 157 } 158 159 /* 160 * Update the cached syncpoint and waitbase values by reading them 161 * from the registers. 162 */ 163 void host1x_syncpt_save(struct host1x *host) 164 { 165 struct host1x_syncpt *sp_base = host->syncpt; 166 unsigned int i; 167 168 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { 169 if (host1x_syncpt_client_managed(sp_base + i)) 170 host1x_hw_syncpt_load(host, sp_base + i); 171 else 172 WARN_ON(!host1x_syncpt_idle(sp_base + i)); 173 } 174 175 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) 176 host1x_hw_syncpt_load_wait_base(host, sp_base + i); 177 } 178 179 /* 180 * Updates the cached syncpoint value by reading a new value from the hardware 181 * register 182 */ 183 u32 host1x_syncpt_load(struct host1x_syncpt *sp) 184 { 185 u32 val; 186 187 val = host1x_hw_syncpt_load(sp->host, sp); 188 trace_host1x_syncpt_load_min(sp->id, val); 189 190 return val; 191 } 192 193 /* 194 * Get the current syncpoint base 195 */ 196 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) 197 { 198 host1x_hw_syncpt_load_wait_base(sp->host, sp); 199 200 return sp->base_val; 201 } 202 203 /** 204 * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache 205 * @sp: host1x syncpoint 206 */ 207 int host1x_syncpt_incr(struct host1x_syncpt *sp) 208 { 209 return host1x_hw_syncpt_cpu_incr(sp->host, sp); 210 } 211 EXPORT_SYMBOL(host1x_syncpt_incr); 212 213 /** 214 * host1x_syncpt_wait() - wait for a syncpoint to reach a given value 215 * @sp: host1x syncpoint 216 * @thresh: threshold 217 * @timeout: maximum time to wait for the syncpoint to reach the given value 218 * @value: return location for the syncpoint value 219 */ 220 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, 221 u32 *value) 222 { 223 struct dma_fence *fence; 224 long wait_err; 225 226 host1x_hw_syncpt_load(sp->host, sp); 227 228 if (value) 229 *value = host1x_syncpt_load(sp); 230 231 if (host1x_syncpt_is_expired(sp, thresh)) 232 return 0; 233 234 if (timeout < 0) 235 timeout = LONG_MAX; 236 else if (timeout == 0) 237 return -EAGAIN; 238 239 fence = host1x_fence_create(sp, thresh, false); 240 if (IS_ERR(fence)) 241 return PTR_ERR(fence); 242 243 wait_err = dma_fence_wait_timeout(fence, true, timeout); 244 if (wait_err == 0) 245 host1x_fence_cancel(fence); 246 dma_fence_put(fence); 247 248 if (value) 249 *value = host1x_syncpt_load(sp); 250 251 if (wait_err == 0) 252 return -EAGAIN; 253 else if (wait_err < 0) 254 return wait_err; 255 else 256 return 0; 257 } 258 EXPORT_SYMBOL(host1x_syncpt_wait); 259 260 /* 261 * Returns true if syncpoint is expired, false if we may need to wait 262 */ 263 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) 264 { 265 u32 current_val; 266 267 smp_rmb(); 268 269 current_val = (u32)atomic_read(&sp->min_val); 270 271 return ((current_val - thresh) & 0x80000000U) == 0U; 272 } 273 274 int host1x_syncpt_init(struct host1x *host) 275 { 276 struct host1x_syncpt_base *bases; 277 struct host1x_syncpt *syncpt; 278 unsigned int i; 279 280 syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt), 281 GFP_KERNEL); 282 if (!syncpt) 283 return -ENOMEM; 284 285 bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases), 286 GFP_KERNEL); 287 if (!bases) 288 return -ENOMEM; 289 290 for (i = 0; i < host->info->nb_pts; i++) { 291 syncpt[i].id = i; 292 syncpt[i].host = host; 293 } 294 295 for (i = 0; i < host->info->nb_bases; i++) 296 bases[i].id = i; 297 298 mutex_init(&host->syncpt_mutex); 299 host->syncpt = syncpt; 300 host->bases = bases; 301 302 /* Allocate sync point to use for clearing waits for expired fences */ 303 host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop"); 304 if (!host->nop_sp) 305 return -ENOMEM; 306 307 if (host->info->reserve_vblank_syncpts) { 308 kref_init(&host->syncpt[26].ref); 309 kref_init(&host->syncpt[27].ref); 310 } 311 312 return 0; 313 } 314 315 /** 316 * host1x_syncpt_request() - request a syncpoint 317 * @client: client requesting the syncpoint 318 * @flags: flags 319 * 320 * host1x client drivers can use this function to allocate a syncpoint for 321 * subsequent use. A syncpoint returned by this function will be reserved for 322 * use by the client exclusively. When no longer using a syncpoint, a host1x 323 * client driver needs to release it using host1x_syncpt_put(). 324 */ 325 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, 326 unsigned long flags) 327 { 328 struct host1x *host = dev_get_drvdata(client->host->parent); 329 330 return host1x_syncpt_alloc(host, flags, dev_name(client->dev)); 331 } 332 EXPORT_SYMBOL(host1x_syncpt_request); 333 334 static void syncpt_release(struct kref *ref) 335 { 336 struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref); 337 338 atomic_set(&sp->max_val, host1x_syncpt_read(sp)); 339 340 sp->locked = false; 341 342 mutex_lock(&sp->host->syncpt_mutex); 343 344 host1x_syncpt_base_free(sp->base); 345 kfree(sp->name); 346 sp->base = NULL; 347 sp->name = NULL; 348 sp->client_managed = false; 349 350 mutex_unlock(&sp->host->syncpt_mutex); 351 } 352 353 /** 354 * host1x_syncpt_put() - free a requested syncpoint 355 * @sp: host1x syncpoint 356 * 357 * Release a syncpoint previously allocated using host1x_syncpt_request(). A 358 * host1x client driver should call this when the syncpoint is no longer in 359 * use. 360 */ 361 void host1x_syncpt_put(struct host1x_syncpt *sp) 362 { 363 if (!sp) 364 return; 365 366 kref_put(&sp->ref, syncpt_release); 367 } 368 EXPORT_SYMBOL(host1x_syncpt_put); 369 370 void host1x_syncpt_deinit(struct host1x *host) 371 { 372 struct host1x_syncpt *sp = host->syncpt; 373 unsigned int i; 374 375 for (i = 0; i < host->info->nb_pts; i++, sp++) 376 kfree(sp->name); 377 } 378 379 /** 380 * host1x_syncpt_read_max() - read maximum syncpoint value 381 * @sp: host1x syncpoint 382 * 383 * The maximum syncpoint value indicates how many operations there are in 384 * queue, either in channel or in a software thread. 385 */ 386 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) 387 { 388 smp_rmb(); 389 390 return (u32)atomic_read(&sp->max_val); 391 } 392 EXPORT_SYMBOL(host1x_syncpt_read_max); 393 394 /** 395 * host1x_syncpt_read_min() - read minimum syncpoint value 396 * @sp: host1x syncpoint 397 * 398 * The minimum syncpoint value is a shadow of the current sync point value in 399 * hardware. 400 */ 401 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) 402 { 403 smp_rmb(); 404 405 return (u32)atomic_read(&sp->min_val); 406 } 407 EXPORT_SYMBOL(host1x_syncpt_read_min); 408 409 /** 410 * host1x_syncpt_read() - read the current syncpoint value 411 * @sp: host1x syncpoint 412 */ 413 u32 host1x_syncpt_read(struct host1x_syncpt *sp) 414 { 415 return host1x_syncpt_load(sp); 416 } 417 EXPORT_SYMBOL(host1x_syncpt_read); 418 419 unsigned int host1x_syncpt_nb_pts(struct host1x *host) 420 { 421 return host->info->nb_pts; 422 } 423 424 unsigned int host1x_syncpt_nb_bases(struct host1x *host) 425 { 426 return host->info->nb_bases; 427 } 428 429 unsigned int host1x_syncpt_nb_mlocks(struct host1x *host) 430 { 431 return host->info->nb_mlocks; 432 } 433 434 /** 435 * host1x_syncpt_get_by_id() - obtain a syncpoint by ID 436 * @host: host1x controller 437 * @id: syncpoint ID 438 */ 439 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, 440 unsigned int id) 441 { 442 if (id >= host->info->nb_pts) 443 return NULL; 444 445 if (kref_get_unless_zero(&host->syncpt[id].ref)) 446 return &host->syncpt[id]; 447 else 448 return NULL; 449 } 450 EXPORT_SYMBOL(host1x_syncpt_get_by_id); 451 452 /** 453 * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't 454 * increase the refcount. 455 * @host: host1x controller 456 * @id: syncpoint ID 457 */ 458 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, 459 unsigned int id) 460 { 461 if (id >= host->info->nb_pts) 462 return NULL; 463 464 return &host->syncpt[id]; 465 } 466 EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref); 467 468 /** 469 * host1x_syncpt_get() - increment syncpoint refcount 470 * @sp: syncpoint 471 */ 472 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp) 473 { 474 kref_get(&sp->ref); 475 476 return sp; 477 } 478 EXPORT_SYMBOL(host1x_syncpt_get); 479 480 /** 481 * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint 482 * @sp: host1x syncpoint 483 */ 484 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp) 485 { 486 return sp ? sp->base : NULL; 487 } 488 EXPORT_SYMBOL(host1x_syncpt_get_base); 489 490 /** 491 * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base 492 * @base: host1x syncpoint wait base 493 */ 494 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base) 495 { 496 return base->id; 497 } 498 EXPORT_SYMBOL(host1x_syncpt_base_id); 499 500 static void do_nothing(struct kref *ref) 501 { 502 } 503 504 /** 505 * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint 506 * available for allocation 507 * 508 * @client: host1x bus client 509 * @syncpt_id: syncpoint ID to make available 510 * 511 * Makes VBLANK<i> syncpoint available for allocatation if it was 512 * reserved at initialization time. This should be called by the display 513 * driver after it has ensured that any VBLANK increment programming configured 514 * by the boot chain has been disabled. 515 */ 516 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, 517 u32 syncpt_id) 518 { 519 struct host1x *host = dev_get_drvdata(client->host->parent); 520 521 if (!host->info->reserve_vblank_syncpts) 522 return; 523 524 kref_put(&host->syncpt[syncpt_id].ref, do_nothing); 525 } 526 EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation); 527