1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2018 Broadcom */ 3 4 /** 5 * DOC: Broadcom V3D scheduling 6 * 7 * The shared DRM GPU scheduler is used to coordinate submitting jobs 8 * to the hardware. Each DRM fd (roughly a client process) gets its 9 * own scheduler entity, which will process jobs in order. The GPU 10 * scheduler will schedule the clients with a FIFO scheduling algorithm. 11 * 12 * For simplicity, and in order to keep latency low for interactive 13 * jobs when bulk background jobs are queued up, we submit a new job 14 * to the HW only when it has completed the last one, instead of 15 * filling up the CT[01]Q FIFOs with jobs. Similarly, we use 16 * `drm_sched_job_add_dependency()` to manage the dependency between bin 17 * and render, instead of having the clients submit jobs using the HW's 18 * semaphores to interlock between them. 19 */ 20 21 #include <linux/sched/clock.h> 22 #include <linux/kthread.h> 23 24 #include <drm/drm_syncobj.h> 25 26 #include "v3d_drv.h" 27 #include "v3d_regs.h" 28 #include "v3d_trace.h" 29 30 #define V3D_CSD_CFG012_WG_COUNT_SHIFT 16 31 32 static struct v3d_job * 33 to_v3d_job(struct drm_sched_job *sched_job) 34 { 35 return container_of(sched_job, struct v3d_job, base); 36 } 37 38 static struct v3d_bin_job * 39 to_bin_job(struct drm_sched_job *sched_job) 40 { 41 return container_of(sched_job, struct v3d_bin_job, base.base); 42 } 43 44 static struct v3d_render_job * 45 to_render_job(struct drm_sched_job *sched_job) 46 { 47 return container_of(sched_job, struct v3d_render_job, base.base); 48 } 49 50 static struct v3d_tfu_job * 51 to_tfu_job(struct drm_sched_job *sched_job) 52 { 53 return container_of(sched_job, struct v3d_tfu_job, base.base); 54 } 55 56 static struct v3d_csd_job * 57 to_csd_job(struct drm_sched_job *sched_job) 58 { 59 return container_of(sched_job, struct v3d_csd_job, base.base); 60 } 61 62 static struct v3d_cpu_job * 63 to_cpu_job(struct drm_sched_job *sched_job) 64 { 65 return container_of(sched_job, struct v3d_cpu_job, base.base); 66 } 67 68 static void 69 v3d_sched_job_free(struct drm_sched_job *sched_job) 70 { 71 struct v3d_job *job = to_v3d_job(sched_job); 72 73 v3d_job_cleanup(job); 74 } 75 76 void 77 v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info, 78 unsigned int count) 79 { 80 if (query_info->queries) { 81 unsigned int i; 82 83 for (i = 0; i < count; i++) 84 drm_syncobj_put(query_info->queries[i].syncobj); 85 86 kvfree(query_info->queries); 87 } 88 } 89 90 void 91 v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, 92 unsigned int count) 93 { 94 if (query_info->queries) { 95 unsigned int i; 96 97 for (i = 0; i < count; i++) { 98 drm_syncobj_put(query_info->queries[i].syncobj); 99 kvfree(query_info->queries[i].kperfmon_ids); 100 } 101 102 kvfree(query_info->queries); 103 } 104 } 105 106 static void 107 v3d_cpu_job_free(struct drm_sched_job *sched_job) 108 { 109 struct v3d_cpu_job *job = to_cpu_job(sched_job); 110 111 v3d_timestamp_query_info_free(&job->timestamp_query, 112 job->timestamp_query.count); 113 114 v3d_performance_query_info_free(&job->performance_query, 115 job->performance_query.count); 116 117 v3d_job_cleanup(&job->base); 118 } 119 120 static void 121 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) 122 { 123 struct v3d_perfmon *perfmon = v3d->global_perfmon; 124 125 if (!perfmon) 126 perfmon = job->perfmon; 127 128 if (perfmon == v3d->active_perfmon) 129 return; 130 131 if (perfmon != v3d->active_perfmon) 132 v3d_perfmon_stop(v3d, v3d->active_perfmon, true); 133 134 if (perfmon && v3d->active_perfmon != perfmon) 135 v3d_perfmon_start(v3d, perfmon); 136 } 137 138 static void 139 v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) 140 { 141 struct v3d_dev *v3d = job->v3d; 142 struct v3d_file_priv *file = job->file->driver_priv; 143 struct v3d_stats *global_stats = &v3d->queue[queue].stats; 144 struct v3d_stats *local_stats = &file->stats[queue]; 145 u64 now = local_clock(); 146 unsigned long flags; 147 148 /* 149 * We only need to disable local interrupts to appease lockdep who 150 * otherwise would think v3d_job_start_stats vs v3d_stats_update has an 151 * unsafe in-irq vs no-irq-off usage problem. This is a false positive 152 * because all the locks are per queue and stats type, and all jobs are 153 * completely one at a time serialised. More specifically: 154 * 155 * 1. Locks for GPU queues are updated from interrupt handlers under a 156 * spin lock and started here with preemption disabled. 157 * 158 * 2. Locks for CPU queues are updated from the worker with preemption 159 * disabled and equally started here with preemption disabled. 160 * 161 * Therefore both are consistent. 162 * 163 * 3. Because next job can only be queued after the previous one has 164 * been signaled, and locks are per queue, there is also no scope for 165 * the start part to race with the update part. 166 */ 167 if (IS_ENABLED(CONFIG_LOCKDEP)) 168 local_irq_save(flags); 169 else 170 preempt_disable(); 171 172 write_seqcount_begin(&local_stats->lock); 173 local_stats->start_ns = now; 174 write_seqcount_end(&local_stats->lock); 175 176 write_seqcount_begin(&global_stats->lock); 177 global_stats->start_ns = now; 178 write_seqcount_end(&global_stats->lock); 179 180 if (IS_ENABLED(CONFIG_LOCKDEP)) 181 local_irq_restore(flags); 182 else 183 preempt_enable(); 184 } 185 186 static void 187 v3d_stats_update(struct v3d_stats *stats, u64 now) 188 { 189 write_seqcount_begin(&stats->lock); 190 stats->enabled_ns += now - stats->start_ns; 191 stats->jobs_completed++; 192 stats->start_ns = 0; 193 write_seqcount_end(&stats->lock); 194 } 195 196 void 197 v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) 198 { 199 struct v3d_dev *v3d = job->v3d; 200 struct v3d_file_priv *file = job->file->driver_priv; 201 struct v3d_stats *global_stats = &v3d->queue[queue].stats; 202 struct v3d_stats *local_stats = &file->stats[queue]; 203 u64 now = local_clock(); 204 unsigned long flags; 205 206 /* See comment in v3d_job_start_stats() */ 207 if (IS_ENABLED(CONFIG_LOCKDEP)) 208 local_irq_save(flags); 209 else 210 preempt_disable(); 211 212 v3d_stats_update(local_stats, now); 213 v3d_stats_update(global_stats, now); 214 215 if (IS_ENABLED(CONFIG_LOCKDEP)) 216 local_irq_restore(flags); 217 else 218 preempt_enable(); 219 } 220 221 static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) 222 { 223 struct v3d_bin_job *job = to_bin_job(sched_job); 224 struct v3d_dev *v3d = job->base.v3d; 225 struct drm_device *dev = &v3d->drm; 226 struct dma_fence *fence; 227 unsigned long irqflags; 228 229 if (unlikely(job->base.base.s_fence->finished.error)) { 230 spin_lock_irqsave(&v3d->job_lock, irqflags); 231 v3d->bin_job = NULL; 232 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 233 return NULL; 234 } 235 236 /* Lock required around bin_job update vs 237 * v3d_overflow_mem_work(). 238 */ 239 spin_lock_irqsave(&v3d->job_lock, irqflags); 240 v3d->bin_job = job; 241 /* Clear out the overflow allocation, so we don't 242 * reuse the overflow attached to a previous job. 243 */ 244 V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); 245 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 246 247 v3d_invalidate_caches(v3d); 248 249 fence = v3d_fence_create(v3d, V3D_BIN); 250 if (IS_ERR(fence)) 251 return NULL; 252 253 if (job->base.irq_fence) 254 dma_fence_put(job->base.irq_fence); 255 job->base.irq_fence = dma_fence_get(fence); 256 257 trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, 258 job->start, job->end); 259 260 v3d_job_start_stats(&job->base, V3D_BIN); 261 v3d_switch_perfmon(v3d, &job->base); 262 263 /* Set the current and end address of the control list. 264 * Writing the end register is what starts the job. 265 */ 266 if (job->qma) { 267 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); 268 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); 269 } 270 if (job->qts) { 271 V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, 272 V3D_CLE_CT0QTS_ENABLE | 273 job->qts); 274 } 275 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); 276 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); 277 278 return fence; 279 } 280 281 static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) 282 { 283 struct v3d_render_job *job = to_render_job(sched_job); 284 struct v3d_dev *v3d = job->base.v3d; 285 struct drm_device *dev = &v3d->drm; 286 struct dma_fence *fence; 287 288 if (unlikely(job->base.base.s_fence->finished.error)) { 289 v3d->render_job = NULL; 290 return NULL; 291 } 292 293 v3d->render_job = job; 294 295 /* Can we avoid this flush? We need to be careful of 296 * scheduling, though -- imagine job0 rendering to texture and 297 * job1 reading, and them being executed as bin0, bin1, 298 * render0, render1, so that render1's flush at bin time 299 * wasn't enough. 300 */ 301 v3d_invalidate_caches(v3d); 302 303 fence = v3d_fence_create(v3d, V3D_RENDER); 304 if (IS_ERR(fence)) 305 return NULL; 306 307 if (job->base.irq_fence) 308 dma_fence_put(job->base.irq_fence); 309 job->base.irq_fence = dma_fence_get(fence); 310 311 trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, 312 job->start, job->end); 313 314 v3d_job_start_stats(&job->base, V3D_RENDER); 315 v3d_switch_perfmon(v3d, &job->base); 316 317 /* XXX: Set the QCFG */ 318 319 /* Set the current and end address of the control list. 320 * Writing the end register is what starts the job. 321 */ 322 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); 323 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); 324 325 return fence; 326 } 327 328 static struct dma_fence * 329 v3d_tfu_job_run(struct drm_sched_job *sched_job) 330 { 331 struct v3d_tfu_job *job = to_tfu_job(sched_job); 332 struct v3d_dev *v3d = job->base.v3d; 333 struct drm_device *dev = &v3d->drm; 334 struct dma_fence *fence; 335 336 if (unlikely(job->base.base.s_fence->finished.error)) { 337 v3d->tfu_job = NULL; 338 return NULL; 339 } 340 341 v3d->tfu_job = job; 342 343 fence = v3d_fence_create(v3d, V3D_TFU); 344 if (IS_ERR(fence)) 345 return NULL; 346 347 if (job->base.irq_fence) 348 dma_fence_put(job->base.irq_fence); 349 job->base.irq_fence = dma_fence_get(fence); 350 351 trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); 352 353 v3d_job_start_stats(&job->base, V3D_TFU); 354 355 V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia); 356 V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis); 357 V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); 358 V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua); 359 V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa); 360 if (v3d->ver >= 71) 361 V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc); 362 V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios); 363 V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]); 364 if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { 365 V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]); 366 V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]); 367 V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]); 368 } 369 /* ICFG kicks off the job. */ 370 V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC); 371 372 return fence; 373 } 374 375 static struct dma_fence * 376 v3d_csd_job_run(struct drm_sched_job *sched_job) 377 { 378 struct v3d_csd_job *job = to_csd_job(sched_job); 379 struct v3d_dev *v3d = job->base.v3d; 380 struct drm_device *dev = &v3d->drm; 381 struct dma_fence *fence; 382 int i, csd_cfg0_reg; 383 384 if (unlikely(job->base.base.s_fence->finished.error)) { 385 v3d->csd_job = NULL; 386 return NULL; 387 } 388 389 v3d->csd_job = job; 390 391 v3d_invalidate_caches(v3d); 392 393 fence = v3d_fence_create(v3d, V3D_CSD); 394 if (IS_ERR(fence)) 395 return NULL; 396 397 if (job->base.irq_fence) 398 dma_fence_put(job->base.irq_fence); 399 job->base.irq_fence = dma_fence_get(fence); 400 401 trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); 402 403 v3d_job_start_stats(&job->base, V3D_CSD); 404 v3d_switch_perfmon(v3d, &job->base); 405 406 csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver); 407 for (i = 1; i <= 6; i++) 408 V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]); 409 410 /* Although V3D 7.1 has an eighth configuration register, we are not 411 * using it. Therefore, make sure it remains unused. 412 * 413 * XXX: Set the CFG7 register 414 */ 415 if (v3d->ver >= 71) 416 V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0); 417 418 /* CFG0 write kicks off the job. */ 419 V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]); 420 421 return fence; 422 } 423 424 static void 425 v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) 426 { 427 struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd; 428 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); 429 struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); 430 struct drm_v3d_submit_csd *args = &indirect_csd->job->args; 431 u32 *wg_counts; 432 433 v3d_get_bo_vaddr(bo); 434 v3d_get_bo_vaddr(indirect); 435 436 wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset); 437 438 if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0) 439 return; 440 441 args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; 442 args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; 443 args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; 444 args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * 445 (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; 446 447 for (int i = 0; i < 3; i++) { 448 /* 0xffffffff indicates that the uniform rewrite is not needed */ 449 if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) { 450 u32 uniform_idx = indirect_csd->wg_uniform_offsets[i]; 451 ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i]; 452 } 453 } 454 455 v3d_put_bo_vaddr(indirect); 456 v3d_put_bo_vaddr(bo); 457 } 458 459 static void 460 v3d_timestamp_query(struct v3d_cpu_job *job) 461 { 462 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; 463 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); 464 u8 *value_addr; 465 466 v3d_get_bo_vaddr(bo); 467 468 for (int i = 0; i < timestamp_query->count; i++) { 469 value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset; 470 *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull; 471 472 drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj, 473 job->base.done_fence); 474 } 475 476 v3d_put_bo_vaddr(bo); 477 } 478 479 static void 480 v3d_reset_timestamp_queries(struct v3d_cpu_job *job) 481 { 482 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; 483 struct v3d_timestamp_query *queries = timestamp_query->queries; 484 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); 485 u8 *value_addr; 486 487 v3d_get_bo_vaddr(bo); 488 489 for (int i = 0; i < timestamp_query->count; i++) { 490 value_addr = ((u8 *)bo->vaddr) + queries[i].offset; 491 *((u64 *)value_addr) = 0; 492 493 drm_syncobj_replace_fence(queries[i].syncobj, NULL); 494 } 495 496 v3d_put_bo_vaddr(bo); 497 } 498 499 static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value) 500 { 501 dst[idx] = value; 502 } 503 504 static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value) 505 { 506 dst[idx] = value; 507 } 508 509 static void 510 write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value) 511 { 512 if (do_64bit) 513 write_to_buffer_64(dst, idx, value); 514 else 515 write_to_buffer_32(dst, idx, value); 516 } 517 518 static void 519 v3d_copy_query_results(struct v3d_cpu_job *job) 520 { 521 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; 522 struct v3d_timestamp_query *queries = timestamp_query->queries; 523 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); 524 struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]); 525 struct v3d_copy_query_results_info *copy = &job->copy; 526 struct dma_fence *fence; 527 u8 *query_addr; 528 bool available, write_result; 529 u8 *data; 530 int i; 531 532 v3d_get_bo_vaddr(bo); 533 v3d_get_bo_vaddr(timestamp); 534 535 data = ((u8 *)bo->vaddr) + copy->offset; 536 537 for (i = 0; i < timestamp_query->count; i++) { 538 fence = drm_syncobj_fence_get(queries[i].syncobj); 539 available = fence ? dma_fence_is_signaled(fence) : false; 540 541 write_result = available || copy->do_partial; 542 if (write_result) { 543 query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset; 544 write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr)); 545 } 546 547 if (copy->availability_bit) 548 write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u); 549 550 data += copy->stride; 551 552 dma_fence_put(fence); 553 } 554 555 v3d_put_bo_vaddr(timestamp); 556 v3d_put_bo_vaddr(bo); 557 } 558 559 static void 560 v3d_reset_performance_queries(struct v3d_cpu_job *job) 561 { 562 struct v3d_performance_query_info *performance_query = &job->performance_query; 563 struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; 564 struct v3d_dev *v3d = job->base.v3d; 565 struct v3d_perfmon *perfmon; 566 567 for (int i = 0; i < performance_query->count; i++) { 568 for (int j = 0; j < performance_query->nperfmons; j++) { 569 perfmon = v3d_perfmon_find(v3d_priv, 570 performance_query->queries[i].kperfmon_ids[j]); 571 if (!perfmon) { 572 DRM_DEBUG("Failed to find perfmon."); 573 continue; 574 } 575 576 v3d_perfmon_stop(v3d, perfmon, false); 577 578 memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64)); 579 580 v3d_perfmon_put(perfmon); 581 } 582 583 drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL); 584 } 585 } 586 587 static void 588 v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, 589 unsigned int query) 590 { 591 struct v3d_performance_query_info *performance_query = 592 &job->performance_query; 593 struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; 594 struct v3d_performance_query *perf_query = 595 &performance_query->queries[query]; 596 struct v3d_dev *v3d = job->base.v3d; 597 unsigned int i, j, offset; 598 599 for (i = 0, offset = 0; 600 i < performance_query->nperfmons; 601 i++, offset += DRM_V3D_MAX_PERF_COUNTERS) { 602 struct v3d_perfmon *perfmon; 603 604 perfmon = v3d_perfmon_find(v3d_priv, 605 perf_query->kperfmon_ids[i]); 606 if (!perfmon) { 607 DRM_DEBUG("Failed to find perfmon."); 608 continue; 609 } 610 611 v3d_perfmon_stop(v3d, perfmon, true); 612 613 if (job->copy.do_64bit) { 614 for (j = 0; j < perfmon->ncounters; j++) 615 write_to_buffer_64(data, offset + j, 616 perfmon->values[j]); 617 } else { 618 for (j = 0; j < perfmon->ncounters; j++) 619 write_to_buffer_32(data, offset + j, 620 perfmon->values[j]); 621 } 622 623 v3d_perfmon_put(perfmon); 624 } 625 } 626 627 static void 628 v3d_copy_performance_query(struct v3d_cpu_job *job) 629 { 630 struct v3d_performance_query_info *performance_query = &job->performance_query; 631 struct v3d_copy_query_results_info *copy = &job->copy; 632 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); 633 struct dma_fence *fence; 634 bool available, write_result; 635 u8 *data; 636 637 v3d_get_bo_vaddr(bo); 638 639 data = ((u8 *)bo->vaddr) + copy->offset; 640 641 for (int i = 0; i < performance_query->count; i++) { 642 fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj); 643 available = fence ? dma_fence_is_signaled(fence) : false; 644 645 write_result = available || copy->do_partial; 646 if (write_result) 647 v3d_write_performance_query_result(job, data, i); 648 649 if (copy->availability_bit) 650 write_to_buffer(data, performance_query->ncounters, 651 copy->do_64bit, available ? 1u : 0u); 652 653 data += copy->stride; 654 655 dma_fence_put(fence); 656 } 657 658 v3d_put_bo_vaddr(bo); 659 } 660 661 static const v3d_cpu_job_fn cpu_job_function[] = { 662 [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, 663 [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, 664 [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries, 665 [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results, 666 [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries, 667 [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query, 668 }; 669 670 static struct dma_fence * 671 v3d_cpu_job_run(struct drm_sched_job *sched_job) 672 { 673 struct v3d_cpu_job *job = to_cpu_job(sched_job); 674 struct v3d_dev *v3d = job->base.v3d; 675 676 if (job->job_type >= ARRAY_SIZE(cpu_job_function)) { 677 DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type); 678 return NULL; 679 } 680 681 v3d_job_start_stats(&job->base, V3D_CPU); 682 trace_v3d_cpu_job_begin(&v3d->drm, job->job_type); 683 684 cpu_job_function[job->job_type](job); 685 686 trace_v3d_cpu_job_end(&v3d->drm, job->job_type); 687 v3d_job_update_stats(&job->base, V3D_CPU); 688 689 return NULL; 690 } 691 692 static struct dma_fence * 693 v3d_cache_clean_job_run(struct drm_sched_job *sched_job) 694 { 695 struct v3d_job *job = to_v3d_job(sched_job); 696 struct v3d_dev *v3d = job->v3d; 697 698 v3d_job_start_stats(job, V3D_CACHE_CLEAN); 699 700 v3d_clean_caches(v3d); 701 702 v3d_job_update_stats(job, V3D_CACHE_CLEAN); 703 704 return NULL; 705 } 706 707 static enum drm_gpu_sched_stat 708 v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) 709 { 710 enum v3d_queue q; 711 712 mutex_lock(&v3d->reset_lock); 713 714 /* block scheduler */ 715 for (q = 0; q < V3D_MAX_QUEUES; q++) 716 drm_sched_stop(&v3d->queue[q].sched, sched_job); 717 718 if (sched_job) 719 drm_sched_increase_karma(sched_job); 720 721 /* get the GPU back into the init state */ 722 v3d_reset(v3d); 723 724 for (q = 0; q < V3D_MAX_QUEUES; q++) 725 drm_sched_resubmit_jobs(&v3d->queue[q].sched); 726 727 /* Unblock schedulers and restart their jobs. */ 728 for (q = 0; q < V3D_MAX_QUEUES; q++) { 729 drm_sched_start(&v3d->queue[q].sched, 0); 730 } 731 732 mutex_unlock(&v3d->reset_lock); 733 734 return DRM_GPU_SCHED_STAT_NOMINAL; 735 } 736 737 /* If the current address or return address have changed, then the GPU 738 * has probably made progress and we should delay the reset. This 739 * could fail if the GPU got in an infinite loop in the CL, but that 740 * is pretty unlikely outside of an i-g-t testcase. 741 */ 742 static enum drm_gpu_sched_stat 743 v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, 744 u32 *timedout_ctca, u32 *timedout_ctra) 745 { 746 struct v3d_job *job = to_v3d_job(sched_job); 747 struct v3d_dev *v3d = job->v3d; 748 u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); 749 u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); 750 751 if (*timedout_ctca != ctca || *timedout_ctra != ctra) { 752 *timedout_ctca = ctca; 753 *timedout_ctra = ctra; 754 return DRM_GPU_SCHED_STAT_NOMINAL; 755 } 756 757 return v3d_gpu_reset_for_timeout(v3d, sched_job); 758 } 759 760 static enum drm_gpu_sched_stat 761 v3d_bin_job_timedout(struct drm_sched_job *sched_job) 762 { 763 struct v3d_bin_job *job = to_bin_job(sched_job); 764 765 return v3d_cl_job_timedout(sched_job, V3D_BIN, 766 &job->timedout_ctca, &job->timedout_ctra); 767 } 768 769 static enum drm_gpu_sched_stat 770 v3d_render_job_timedout(struct drm_sched_job *sched_job) 771 { 772 struct v3d_render_job *job = to_render_job(sched_job); 773 774 return v3d_cl_job_timedout(sched_job, V3D_RENDER, 775 &job->timedout_ctca, &job->timedout_ctra); 776 } 777 778 static enum drm_gpu_sched_stat 779 v3d_generic_job_timedout(struct drm_sched_job *sched_job) 780 { 781 struct v3d_job *job = to_v3d_job(sched_job); 782 783 return v3d_gpu_reset_for_timeout(job->v3d, sched_job); 784 } 785 786 static enum drm_gpu_sched_stat 787 v3d_csd_job_timedout(struct drm_sched_job *sched_job) 788 { 789 struct v3d_csd_job *job = to_csd_job(sched_job); 790 struct v3d_dev *v3d = job->base.v3d; 791 u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver)); 792 793 /* If we've made progress, skip reset and let the timer get 794 * rearmed. 795 */ 796 if (job->timedout_batches != batches) { 797 job->timedout_batches = batches; 798 return DRM_GPU_SCHED_STAT_NOMINAL; 799 } 800 801 return v3d_gpu_reset_for_timeout(v3d, sched_job); 802 } 803 804 static const struct drm_sched_backend_ops v3d_bin_sched_ops = { 805 .run_job = v3d_bin_job_run, 806 .timedout_job = v3d_bin_job_timedout, 807 .free_job = v3d_sched_job_free, 808 }; 809 810 static const struct drm_sched_backend_ops v3d_render_sched_ops = { 811 .run_job = v3d_render_job_run, 812 .timedout_job = v3d_render_job_timedout, 813 .free_job = v3d_sched_job_free, 814 }; 815 816 static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { 817 .run_job = v3d_tfu_job_run, 818 .timedout_job = v3d_generic_job_timedout, 819 .free_job = v3d_sched_job_free, 820 }; 821 822 static const struct drm_sched_backend_ops v3d_csd_sched_ops = { 823 .run_job = v3d_csd_job_run, 824 .timedout_job = v3d_csd_job_timedout, 825 .free_job = v3d_sched_job_free 826 }; 827 828 static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { 829 .run_job = v3d_cache_clean_job_run, 830 .timedout_job = v3d_generic_job_timedout, 831 .free_job = v3d_sched_job_free 832 }; 833 834 static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { 835 .run_job = v3d_cpu_job_run, 836 .timedout_job = v3d_generic_job_timedout, 837 .free_job = v3d_cpu_job_free 838 }; 839 840 static int 841 v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops, 842 enum v3d_queue queue, const char *name) 843 { 844 struct drm_sched_init_args args = { 845 .num_rqs = DRM_SCHED_PRIORITY_COUNT, 846 .credit_limit = 1, 847 .timeout = msecs_to_jiffies(500), 848 .dev = v3d->drm.dev, 849 }; 850 851 args.ops = ops; 852 args.name = name; 853 854 return drm_sched_init(&v3d->queue[queue].sched, &args); 855 } 856 857 int 858 v3d_sched_init(struct v3d_dev *v3d) 859 { 860 int ret; 861 862 ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin"); 863 if (ret) 864 return ret; 865 866 ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER, 867 "v3d_render"); 868 if (ret) 869 goto fail; 870 871 ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu"); 872 if (ret) 873 goto fail; 874 875 if (v3d_has_csd(v3d)) { 876 ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD, 877 "v3d_csd"); 878 if (ret) 879 goto fail; 880 881 ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops, 882 V3D_CACHE_CLEAN, "v3d_cache_clean"); 883 if (ret) 884 goto fail; 885 } 886 887 ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu"); 888 if (ret) 889 goto fail; 890 891 return 0; 892 893 fail: 894 v3d_sched_fini(v3d); 895 return ret; 896 } 897 898 void 899 v3d_sched_fini(struct v3d_dev *v3d) 900 { 901 enum v3d_queue q; 902 903 for (q = 0; q < V3D_MAX_QUEUES; q++) { 904 if (v3d->queue[q].sched.ready) 905 drm_sched_fini(&v3d->queue[q].sched); 906 } 907 } 908