/linux/scripts/ |
H A D | jobserver-exec | 15 jobs = b"" variable 48 jobs += slot 54 if len(jobs): 55 os.write(writer, jobs) 59 claim = len(jobs) + 1 74 if len(jobs): 75 os.write(writer, jobs)
|
H A D | generate_initcall_order.pl | 18 my $jobs = {}; # child process pid -> file handle 169 if (!exists($jobs->{$pid})) { 173 my $fh = $jobs->{$pid}; 181 delete($jobs->{$pid}); 202 $jobs->{$pid} = $fh; 213 if (scalar(keys(%{$jobs})) >= $njobs) { 219 while (scalar(keys(%{$jobs})) > 0) {
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() 173 pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; in panfrost_dequeue_job() 174 pfdev->jobs[slot][1] = NULL; in panfrost_dequeue_job() 186 if (!pfdev->jobs[slot][0]) { in panfrost_enqueue_job() 187 pfdev->jobs[slot][0] = job; in panfrost_enqueue_job() 191 WARN_ON(pfdev->jobs[slot][1]); in panfrost_enqueue_job() 192 pfdev->jobs[slot][1] = job; in panfrost_enqueue_job() 194 panfrost_get_job_chain_flag(pfdev->jobs[slot][0])); in panfrost_enqueue_job() 557 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) { in panfrost_job_handle_irq() 584 if (!failed[j] || !pfdev->jobs[j][0]) in panfrost_job_handle_irq() [all …]
|
H A D | TODO | 11 - Compute job support. So called 'compute only' jobs need to be plumbed up to
|
H A D | panfrost_device.h | 129 struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; member
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_drv.c | 302 struct panthor_job_ctx *jobs; member 457 ctx->jobs[idx].job = job; in panthor_submit_ctx_add_job() 459 ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs); in panthor_submit_ctx_add_job() 463 ctx->jobs[idx].syncop_count = syncs->count; in panthor_submit_ctx_add_job() 502 struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished; in panthor_submit_ctx_update_job_sync_signal_fences() 503 const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops; in panthor_submit_ctx_update_job_sync_signal_fences() 504 u32 sync_op_count = ctx->jobs[job_idx].syncop_count; in panthor_submit_ctx_update_job_sync_signal_fences() 541 const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops; in panthor_submit_ctx_collect_job_signal_ops() 542 u32 sync_op_count = ctx->jobs[job_idx].syncop_count; in panthor_submit_ctx_collect_job_signal_ops() 603 const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops; in panthor_submit_ctx_add_sync_deps_to_job() [all …]
|
/linux/drivers/md/ |
H A D | dm-kcopyd.c | 417 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 443 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument 450 if (!list_empty(jobs)) { in pop() 451 if (jobs == &kc->io_jobs) in pop() 452 job = pop_io_job(jobs, kc); in pop() 454 job = list_entry(jobs->next, struct kcopyd_job, list); in pop() 463 static void push(struct list_head *jobs, struct kcopyd_job *job) in push() argument 469 list_add_tail(&job->list, jobs); in push() 474 static void push_head(struct list_head *jobs, struct kcopyd_job *job) in push_head() argument [all …]
|
/linux/Documentation/core-api/ |
H A D | padata.rst | 9 Padata is a mechanism by which the kernel can farm jobs out to be done in 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 25 The first step in using padata to run serialized jobs is to set up a 26 padata_instance structure for overall control of how jobs are to be run:: 39 jobs to be serialized independently. A padata_instance may have one or more 40 padata_shells associated with it, each allowing a separate series of jobs. 45 The CPUs used to run jobs can be changed in two ways, programmatically with 52 parallel cpumask describes which processors will be used to execute jobs 116 true parallelism is achieved by submitting multiple jobs. parallel() runs with 141 pains to ensure that jobs are completed in the order in which they were [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_cs.c | 296 num_ibs[i], &p->jobs[i]); in amdgpu_cs_pass1() 299 p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id]; in amdgpu_cs_pass1() 301 p->gang_leader = p->jobs[p->gang_leader_idx]; in amdgpu_cs_pass1() 348 job = p->jobs[r]; in amdgpu_cs_p2_ib() 583 p->jobs[i]->shadow_va = shadow->shadow_va; in amdgpu_cs_p2_shadow() 584 p->jobs[i]->csa_va = shadow->csa_va; in amdgpu_cs_p2_shadow() 585 p->jobs[i]->gds_va = shadow->gds_va; in amdgpu_cs_p2_shadow() 586 p->jobs[i]->init_shadow = in amdgpu_cs_p2_shadow() 985 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, in amdgpu_cs_parser_bos() 1013 struct amdgpu_job *job = p->jobs[i]; in trace_amdgpu_cs_ibs() [all …]
|
H A D | amdgpu_cs.h | 62 struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE]; member
|
/linux/Documentation/gpu/ |
H A D | drm-compute.rst | 10 Some hardware may schedule compute jobs, and have no way to pre-empt them, or 16 As with normal compute jobs, dma-fence may not be used at all. In this case, 34 older compute jobs to start a new one. 43 into cgroups, that will allow jobs to run next to each other without
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | kcopyd.rst | 10 to set aside for their copy jobs. This is done with a call to 43 When a user is done with all their copy jobs, they should call
|
H A D | unstriped.rst | 105 has read and write jobs that are independent of each other. Compared to
|
/linux/Documentation/dev-tools/kunit/ |
H A D | run_wrapper.rst | 33 ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` 36 - ``--jobs`` sets the number of threads to build the kernel. 231 --jobs=12 \ 304 - ``--jobs``: Specifies the number of jobs (commands) to run simultaneously.
|
/linux/tools/perf/scripts/python/ |
H A D | parallel-perf.py | 724 if self.jobs < 0 or self.nr < 0 or self.interval < 0: 728 if self.jobs == 0: 729 self.jobs = NumberOfCPUs() 734 self.nr = self.jobs 835 result = RunWork(self.worklist, self.jobs, verbosity=self.verbosity)
|
/linux/tools/testing/selftests/net/ |
H A D | udpgro_bench.sh | 16 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
H A D | udpgro_frglist.sh | 16 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
H A D | udpgro.sh | 22 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
|
/linux/Documentation/admin-guide/cgroup-v1/ |
H A D | memcg_test.rst | 223 run jobs under child_a and child_b 225 create/delete following groups at random while jobs are running:: 231 running new jobs in new group is also good.
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | hwpoison.rst | 127 echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
|
/linux/Documentation/admin-guide/mm/ |
H A D | multigen_lru.rst | 103 scheduler needs to estimate the working sets of the existing jobs. 162 existing jobs.
|
/linux/Documentation/driver-api/tty/ |
H A D | index.rst | 25 implementing echoes, signal handling, jobs control, special characters
|
/linux/Documentation/accounting/ |
H A D | psi.rst | 27 dynamically using techniques such as load shedding, migrating jobs to 29 priority or restartable batch jobs.
|
/linux/tools/cgroup/ |
H A D | iocost_coef_gen.py | 89 def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs): argument
|
/linux/include/uapi/drm/ |
H A D | pvr_drm.h | 1288 struct drm_pvr_obj_array jobs; member
|