Lines Matching +full:max +full:- +full:cur
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
13 * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
14 * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
34 * struct vmw_bo_dirty - Dirty information for buffer objects
57 return vbo->dirty && (vbo->dirty->start < vbo->dirty->end); in vmw_bo_is_dirty()
61 * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
66 * dirty-tracking method.
70 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable()
71 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_pagetable()
72 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable()
77 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable()
78 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable()
79 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
81 dirty->change_count++; in vmw_bo_dirty_scan_pagetable()
83 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable()
85 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_pagetable()
86 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable()
87 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_scan_pagetable()
89 offset, dirty->bitmap_size); in vmw_bo_dirty_scan_pagetable()
91 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable()
92 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable()
93 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable()
98 * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
101 * Write-protect pages written to so that consecutive write accesses will
104 * This function may change the dirty-tracking method.
108 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite()
109 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_mkwrite()
110 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite()
113 if (dirty->end <= dirty->start) in vmw_bo_dirty_scan_mkwrite()
116 num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite()
117 dirty->start + offset, in vmw_bo_dirty_scan_mkwrite()
118 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
120 if (100UL * num_marked / dirty->bitmap_size > in vmw_bo_dirty_scan_mkwrite()
122 dirty->change_count++; in vmw_bo_dirty_scan_mkwrite()
124 dirty->change_count = 0; in vmw_bo_dirty_scan_mkwrite()
126 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_mkwrite()
128 pgoff_t end = dirty->bitmap_size; in vmw_bo_dirty_scan_mkwrite()
130 dirty->method = VMW_BO_DIRTY_PAGETABLE; in vmw_bo_dirty_scan_mkwrite()
132 &dirty->bitmap[0], in vmw_bo_dirty_scan_mkwrite()
134 bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size); in vmw_bo_dirty_scan_mkwrite()
135 if (dirty->start < dirty->end) in vmw_bo_dirty_scan_mkwrite()
136 bitmap_set(&dirty->bitmap[0], dirty->start, in vmw_bo_dirty_scan_mkwrite()
137 dirty->end - dirty->start); in vmw_bo_dirty_scan_mkwrite()
138 dirty->change_count = 0; in vmw_bo_dirty_scan_mkwrite()
143 * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
151 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan()
153 if (dirty->method == VMW_BO_DIRTY_PAGETABLE) in vmw_bo_dirty_scan()
160 * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
173 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_pre_unmap()
174 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_pre_unmap()
175 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_pre_unmap()
177 if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) in vmw_bo_dirty_pre_unmap()
180 wp_shared_mapping_range(mapping, start + offset, end - start); in vmw_bo_dirty_pre_unmap()
182 end - start, offset, in vmw_bo_dirty_pre_unmap()
183 &dirty->bitmap[0], &dirty->start, in vmw_bo_dirty_pre_unmap()
184 &dirty->end); in vmw_bo_dirty_pre_unmap()
188 * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
198 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_unmap()
199 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_unmap()
203 (loff_t) (end - start) << PAGE_SHIFT); in vmw_bo_dirty_unmap()
207 * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
210 * This function registers a dirty-tracking user to a buffer object.
211 * A user can be for example a resource or a vma in a special user-space
214 * Return: Zero on success, -ENOMEM on memory allocation failure.
218 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_add()
219 pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size); in vmw_bo_dirty_add()
224 kref_get(&dirty->ref_count); in vmw_bo_dirty_add()
231 ret = -ENOMEM; in vmw_bo_dirty_add()
235 dirty->bitmap_size = num_pages; in vmw_bo_dirty_add()
236 dirty->start = dirty->bitmap_size; in vmw_bo_dirty_add()
237 dirty->end = 0; in vmw_bo_dirty_add()
238 kref_init(&dirty->ref_count); in vmw_bo_dirty_add()
240 dirty->method = VMW_BO_DIRTY_PAGETABLE; in vmw_bo_dirty_add()
242 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_add()
243 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_add()
245 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_add()
247 /* Write-protect and then pick up already dirty bits */ in vmw_bo_dirty_add()
251 &dirty->bitmap[0], in vmw_bo_dirty_add()
252 &dirty->start, &dirty->end); in vmw_bo_dirty_add()
255 vbo->dirty = dirty; in vmw_bo_dirty_add()
264 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
267 * This function releases a dirty-tracking user from a buffer object.
268 * If the reference count reaches zero, then the dirty-tracking object is
271 * Return: Zero on success, -ENOMEM on memory allocation failure.
275 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_release()
277 if (dirty && kref_put(&dirty->ref_count, (void *)kvfree)) in vmw_bo_dirty_release()
278 vbo->dirty = NULL; in vmw_bo_dirty_release()
282 * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
293 struct vmw_bo *vbo = res->guest_memory_bo; in vmw_bo_dirty_transfer_to_res()
294 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_transfer_to_res()
295 pgoff_t start, cur, end; in vmw_bo_dirty_transfer_to_res() local
296 unsigned long res_start = res->guest_memory_offset; in vmw_bo_dirty_transfer_to_res()
297 unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; in vmw_bo_dirty_transfer_to_res()
303 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_transfer_to_res()
306 cur = max(res_start, dirty->start); in vmw_bo_dirty_transfer_to_res()
307 res_end = max(res_end, dirty->end); in vmw_bo_dirty_transfer_to_res()
308 while (cur < res_end) { in vmw_bo_dirty_transfer_to_res()
311 start = find_next_bit(&dirty->bitmap[0], res_end, cur); in vmw_bo_dirty_transfer_to_res()
315 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); in vmw_bo_dirty_transfer_to_res()
316 cur = end + 1; in vmw_bo_dirty_transfer_to_res()
317 num = end - start; in vmw_bo_dirty_transfer_to_res()
318 bitmap_clear(&dirty->bitmap[0], start, num); in vmw_bo_dirty_transfer_to_res()
322 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_transfer_to_res()
323 dirty->start = res_end; in vmw_bo_dirty_transfer_to_res()
324 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_transfer_to_res()
325 dirty->end = res_start; in vmw_bo_dirty_transfer_to_res()
330 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_clear()
331 pgoff_t start, cur, end; in vmw_bo_dirty_clear() local
333 unsigned long res_end = vbo->tbo.base.size; in vmw_bo_dirty_clear()
339 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_clear()
342 cur = max(res_start, dirty->start); in vmw_bo_dirty_clear()
343 res_end = max(res_end, dirty->end); in vmw_bo_dirty_clear()
344 while (cur < res_end) { in vmw_bo_dirty_clear()
347 start = find_next_bit(&dirty->bitmap[0], res_end, cur); in vmw_bo_dirty_clear()
351 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); in vmw_bo_dirty_clear()
352 cur = end + 1; in vmw_bo_dirty_clear()
353 num = end - start; in vmw_bo_dirty_clear()
354 bitmap_clear(&dirty->bitmap[0], start, num); in vmw_bo_dirty_clear()
357 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_clear()
358 dirty->start = res_end; in vmw_bo_dirty_clear()
359 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_clear()
360 dirty->end = res_start; in vmw_bo_dirty_clear()
364 * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
373 unsigned long res_start = res->guest_memory_offset; in vmw_bo_dirty_clear_res()
374 unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; in vmw_bo_dirty_clear_res()
375 struct vmw_bo *vbo = res->guest_memory_bo; in vmw_bo_dirty_clear_res()
376 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_clear_res()
381 if (res_start >= dirty->end || res_end <= dirty->start) in vmw_bo_dirty_clear_res()
384 res_start = max(res_start, dirty->start); in vmw_bo_dirty_clear_res()
385 res_end = min(res_end, dirty->end); in vmw_bo_dirty_clear_res()
386 bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start); in vmw_bo_dirty_clear_res()
388 if (res_start <= dirty->start && res_end > dirty->start) in vmw_bo_dirty_clear_res()
389 dirty->start = res_end; in vmw_bo_dirty_clear_res()
390 if (res_start < dirty->end && res_end >= dirty->end) in vmw_bo_dirty_clear_res()
391 dirty->end = res_start; in vmw_bo_dirty_clear_res()
396 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite()
398 vma->vm_private_data; in vmw_bo_vm_mkwrite()
402 struct vmw_bo *vbo = to_vmw_bo(&bo->base); in vmw_bo_vm_mkwrite()
408 save_flags = vmf->flags; in vmw_bo_vm_mkwrite()
409 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite()
411 vmf->flags = save_flags; in vmw_bo_vm_mkwrite()
415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
416 if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { in vmw_bo_vm_mkwrite()
421 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && in vmw_bo_vm_mkwrite()
422 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite()
423 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_vm_mkwrite()
425 __set_bit(page_offset, &dirty->bitmap[0]); in vmw_bo_vm_mkwrite()
426 dirty->start = min(dirty->start, page_offset); in vmw_bo_vm_mkwrite()
427 dirty->end = max(dirty->end, page_offset + 1); in vmw_bo_vm_mkwrite()
431 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_mkwrite()
437 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault()
439 vma->vm_private_data; in vmw_bo_vm_fault()
440 struct vmw_bo *vbo = to_vmw_bo(&bo->base); in vmw_bo_vm_fault()
449 num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : in vmw_bo_vm_fault()
452 if (vbo->dirty) { in vmw_bo_vm_fault()
456 page_offset = vmf->pgoff - in vmw_bo_vm_fault()
457 drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_fault()
458 if (page_offset >= PFN_UP(bo->resource->size) || in vmw_bo_vm_fault()
471 * sure the page protection is write-enabled so we don't get in vmw_bo_vm_fault()
474 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) in vmw_bo_vm_fault()
475 prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); in vmw_bo_vm_fault()
477 prot = vm_get_page_prot(vma->vm_flags); in vmw_bo_vm_fault()
480 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in vmw_bo_vm_fault()
484 dma_resv_unlock(bo->base.resv); in vmw_bo_vm_fault()