Lines Matching defs:work

45 static void async_pf_execute(struct work_struct *work)
48 container_of(work, struct kvm_async_pf, work);
63 * work item is fully processed.
99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
102 * The async #PF is "done", but KVM must wait for the work item itself,
105 * after the last call to module_put(). Note, flushing the work item
111 * need to be flushed (but sanity check that the work wasn't queued).
113 if (work->wakeup_all)
114 WARN_ON_ONCE(work->work.func);
116 flush_work(&work->work);
117 kmem_cache_free(async_pf_cache, work);
122 /* cancel outstanding work queue item */
124 struct kvm_async_pf *work =
126 typeof(*work), queue);
127 list_del(&work->queue);
130 flush_work(&work->work);
132 if (cancel_work_sync(&work->work))
133 kmem_cache_free(async_pf_cache, work);
139 struct kvm_async_pf *work =
141 typeof(*work), link);
142 list_del(&work->link);
145 kvm_flush_and_free_async_pf_work(work);
155 struct kvm_async_pf *work;
160 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
162 list_del(&work->link);
165 kvm_arch_async_page_ready(vcpu, work);
167 kvm_arch_async_page_present(vcpu, work);
169 list_del(&work->queue);
171 kvm_flush_and_free_async_pf_work(work);
182 struct kvm_async_pf *work;
195 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
196 if (!work)
199 work->wakeup_all = false;
200 work->vcpu = vcpu;
201 work->cr2_or_gpa = cr2_or_gpa;
202 work->addr = hva;
203 work->arch = *arch;
205 INIT_WORK(&work->work, async_pf_execute);
207 list_add_tail(&work->queue, &vcpu->async_pf.queue);
209 work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
211 schedule_work(&work->work);
218 struct kvm_async_pf *work;
224 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
225 if (!work)
228 work->wakeup_all = true;
229 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
233 list_add_tail(&work->link, &vcpu->async_pf.done);