xref: /linux/drivers/vfio/pci/pds/dirty.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc. */
3 
4 #include <linux/interval_tree.h>
5 #include <linux/vfio.h>
6 #include <linux/vmalloc.h>
7 
8 #include <linux/pds/pds_common.h>
9 #include <linux/pds/pds_core_if.h>
10 #include <linux/pds/pds_adminq.h>
11 
12 #include "vfio_dev.h"
13 #include "cmds.h"
14 #include "dirty.h"
15 
16 #define READ_SEQ true
17 #define WRITE_ACK false
18 
19 bool pds_vfio_dirty_is_enabled(struct pds_vfio_pci_device *pds_vfio)
20 {
21 	return pds_vfio->dirty.is_enabled;
22 }
23 
24 void pds_vfio_dirty_set_enabled(struct pds_vfio_pci_device *pds_vfio)
25 {
26 	pds_vfio->dirty.is_enabled = true;
27 }
28 
29 void pds_vfio_dirty_set_disabled(struct pds_vfio_pci_device *pds_vfio)
30 {
31 	pds_vfio->dirty.is_enabled = false;
32 }
33 
34 static void
35 pds_vfio_print_guest_region_info(struct pds_vfio_pci_device *pds_vfio,
36 				 u8 max_regions)
37 {
38 	int len = max_regions * sizeof(struct pds_lm_dirty_region_info);
39 	struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
40 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
41 	struct pds_lm_dirty_region_info *region_info;
42 	dma_addr_t regions_dma;
43 	u8 num_regions;
44 	int err;
45 
46 	region_info = kcalloc(max_regions,
47 			      sizeof(struct pds_lm_dirty_region_info),
48 			      GFP_KERNEL);
49 	if (!region_info)
50 		return;
51 
52 	regions_dma =
53 		dma_map_single(pdsc_dev, region_info, len, DMA_FROM_DEVICE);
54 	if (dma_mapping_error(pdsc_dev, regions_dma))
55 		goto out_free_region_info;
56 
57 	err = pds_vfio_dirty_status_cmd(pds_vfio, regions_dma, &max_regions,
58 					&num_regions);
59 	dma_unmap_single(pdsc_dev, regions_dma, len, DMA_FROM_DEVICE);
60 	if (err)
61 		goto out_free_region_info;
62 
63 	for (unsigned int i = 0; i < num_regions; i++)
64 		dev_dbg(&pdev->dev,
65 			"region_info[%d]: dma_base 0x%llx page_count %u page_size_log2 %u\n",
66 			i, le64_to_cpu(region_info[i].dma_base),
67 			le32_to_cpu(region_info[i].page_count),
68 			region_info[i].page_size_log2);
69 
70 out_free_region_info:
71 	kfree(region_info);
72 }
73 
74 static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region,
75 					unsigned long bytes)
76 {
77 	unsigned long *host_seq_bmp, *host_ack_bmp;
78 
79 	host_seq_bmp = vzalloc(bytes);
80 	if (!host_seq_bmp)
81 		return -ENOMEM;
82 
83 	host_ack_bmp = vzalloc(bytes);
84 	if (!host_ack_bmp) {
85 		vfree(host_seq_bmp);
86 		return -ENOMEM;
87 	}
88 
89 	region->host_seq = host_seq_bmp;
90 	region->host_ack = host_ack_bmp;
91 	region->bmp_bytes = bytes;
92 
93 	return 0;
94 }
95 
96 static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
97 {
98 	if (!dirty->regions)
99 		return;
100 
101 	for (int i = 0; i < dirty->num_regions; i++) {
102 		struct pds_vfio_region *region = &dirty->regions[i];
103 
104 		vfree(region->host_seq);
105 		vfree(region->host_ack);
106 		region->host_seq = NULL;
107 		region->host_ack = NULL;
108 		region->bmp_bytes = 0;
109 	}
110 }
111 
112 static void __pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio,
113 				      struct pds_vfio_region *region)
114 {
115 	struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
116 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
117 
118 	dma_unmap_single(pdsc_dev, region->sgl_addr,
119 			 region->num_sge * sizeof(struct pds_lm_sg_elem),
120 			 DMA_BIDIRECTIONAL);
121 	kfree(region->sgl);
122 
123 	region->num_sge = 0;
124 	region->sgl = NULL;
125 	region->sgl_addr = 0;
126 }
127 
128 static void pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio)
129 {
130 	struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
131 
132 	if (!dirty->regions)
133 		return;
134 
135 	for (int i = 0; i < dirty->num_regions; i++) {
136 		struct pds_vfio_region *region = &dirty->regions[i];
137 
138 		if (region->sgl)
139 			__pds_vfio_dirty_free_sgl(pds_vfio, region);
140 	}
141 }
142 
143 static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
144 				    struct pds_vfio_region *region,
145 				    u32 page_count)
146 {
147 	struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
148 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
149 	struct pds_lm_sg_elem *sgl;
150 	dma_addr_t sgl_addr;
151 	size_t sgl_size;
152 	u32 max_sge;
153 
154 	max_sge = DIV_ROUND_UP(page_count, PAGE_SIZE * 8);
155 	sgl_size = max_sge * sizeof(struct pds_lm_sg_elem);
156 
157 	sgl = kzalloc(sgl_size, GFP_KERNEL);
158 	if (!sgl)
159 		return -ENOMEM;
160 
161 	sgl_addr = dma_map_single(pdsc_dev, sgl, sgl_size, DMA_BIDIRECTIONAL);
162 	if (dma_mapping_error(pdsc_dev, sgl_addr)) {
163 		kfree(sgl);
164 		return -EIO;
165 	}
166 
167 	region->sgl = sgl;
168 	region->num_sge = max_sge;
169 	region->sgl_addr = sgl_addr;
170 
171 	return 0;
172 }
173 
174 static void pds_vfio_dirty_free_regions(struct pds_vfio_dirty *dirty)
175 {
176 	vfree(dirty->regions);
177 	dirty->regions = NULL;
178 	dirty->num_regions = 0;
179 }
180 
181 static int pds_vfio_dirty_alloc_regions(struct pds_vfio_pci_device *pds_vfio,
182 					struct pds_lm_dirty_region_info *region_info,
183 					u64 region_page_size, u8 num_regions)
184 {
185 	struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
186 	struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
187 	u32 dev_bmp_offset_byte = 0;
188 	int err;
189 
190 	dirty->regions = vcalloc(num_regions, sizeof(struct pds_vfio_region));
191 	if (!dirty->regions)
192 		return -ENOMEM;
193 	dirty->num_regions = num_regions;
194 
195 	for (int i = 0; i < num_regions; i++) {
196 		struct pds_lm_dirty_region_info *ri = &region_info[i];
197 		struct pds_vfio_region *region = &dirty->regions[i];
198 		u64 region_size, region_start;
199 		u32 page_count;
200 
201 		/* page_count might be adjusted by the device */
202 		page_count = le32_to_cpu(ri->page_count);
203 		region_start = le64_to_cpu(ri->dma_base);
204 		region_size = page_count * region_page_size;
205 
206 		err = pds_vfio_dirty_alloc_bitmaps(region,
207 						   page_count / BITS_PER_BYTE);
208 		if (err) {
209 			dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
210 				ERR_PTR(err));
211 			goto out_free_regions;
212 		}
213 
214 		err = pds_vfio_dirty_alloc_sgl(pds_vfio, region, page_count);
215 		if (err) {
216 			dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
217 				ERR_PTR(err));
218 			goto out_free_regions;
219 		}
220 
221 		region->size = region_size;
222 		region->start = region_start;
223 		region->page_size = region_page_size;
224 		region->dev_bmp_offset_start_byte = dev_bmp_offset_byte;
225 
226 		dev_bmp_offset_byte += page_count / BITS_PER_BYTE;
227 		if (dev_bmp_offset_byte % BITS_PER_BYTE) {
228 			dev_err(&pdev->dev, "Device bitmap offset is mis-aligned\n");
229 			err = -EINVAL;
230 			goto out_free_regions;
231 		}
232 	}
233 
234 	return 0;
235 
236 out_free_regions:
237 	pds_vfio_dirty_free_bitmaps(dirty);
238 	pds_vfio_dirty_free_sgl(pds_vfio);
239 	pds_vfio_dirty_free_regions(dirty);
240 
241 	return err;
242 }
243 
244 static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
245 				 struct rb_root_cached *ranges, u32 nnodes,
246 				 u64 *page_size)
247 {
248 	struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
249 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
250 	struct pds_lm_dirty_region_info *region_info;
251 	struct interval_tree_node *node = NULL;
252 	u64 region_page_size = *page_size;
253 	u8 max_regions = 0, num_regions;
254 	dma_addr_t regions_dma = 0;
255 	u32 num_ranges = nnodes;
256 	int err;
257 	u16 len;
258 
259 	dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n",
260 		pds_vfio->vf_id);
261 
262 	if (pds_vfio_dirty_is_enabled(pds_vfio))
263 		return -EINVAL;
264 
265 	/* find if dirty tracking is disabled, i.e. num_regions == 0 */
266 	err = pds_vfio_dirty_status_cmd(pds_vfio, 0, &max_regions,
267 					&num_regions);
268 	if (err < 0) {
269 		dev_err(&pdev->dev, "Failed to get dirty status, err %pe\n",
270 			ERR_PTR(err));
271 		return err;
272 	} else if (num_regions) {
273 		dev_err(&pdev->dev,
274 			"Dirty tracking already enabled for %d regions\n",
275 			num_regions);
276 		return -EEXIST;
277 	} else if (!max_regions) {
278 		dev_err(&pdev->dev,
279 			"Device doesn't support dirty tracking, max_regions %d\n",
280 			max_regions);
281 		return -EOPNOTSUPP;
282 	}
283 
284 	if (num_ranges > max_regions) {
285 		vfio_combine_iova_ranges(ranges, nnodes, max_regions);
286 		num_ranges = max_regions;
287 	}
288 
289 	region_info = kcalloc(num_ranges, sizeof(*region_info), GFP_KERNEL);
290 	if (!region_info)
291 		return -ENOMEM;
292 	len = num_ranges * sizeof(*region_info);
293 
294 	node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
295 	if (!node) {
296 		err = -EINVAL;
297 		goto out_free_region_info;
298 	}
299 
300 	for (int i = 0; i < num_ranges; i++) {
301 		struct pds_lm_dirty_region_info *ri = &region_info[i];
302 		u64 region_size = node->last - node->start + 1;
303 		u64 region_start = node->start;
304 		u32 page_count;
305 
306 		page_count = DIV_ROUND_UP(region_size, region_page_size);
307 
308 		ri->dma_base = cpu_to_le64(region_start);
309 		ri->page_count = cpu_to_le32(page_count);
310 		ri->page_size_log2 = ilog2(region_page_size);
311 
312 		dev_dbg(&pdev->dev,
313 			"region_info[%d]: region_start 0x%llx region_end 0x%lx region_size 0x%llx page_count %u page_size %llu\n",
314 			i, region_start, node->last, region_size, page_count,
315 			region_page_size);
316 
317 		node = interval_tree_iter_next(node, 0, ULONG_MAX);
318 	}
319 
320 	regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len,
321 				     DMA_BIDIRECTIONAL);
322 	if (dma_mapping_error(pdsc_dev, regions_dma)) {
323 		err = -ENOMEM;
324 		goto out_free_region_info;
325 	}
326 
327 	err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, num_ranges);
328 	dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL);
329 	if (err)
330 		goto out_free_region_info;
331 
332 	err = pds_vfio_dirty_alloc_regions(pds_vfio, region_info,
333 					   region_page_size, num_ranges);
334 	if (err) {
335 		dev_err(&pdev->dev,
336 			"Failed to allocate %d regions for tracking dirty regions: %pe\n",
337 			num_regions, ERR_PTR(err));
338 		goto out_dirty_disable;
339 	}
340 
341 	pds_vfio_dirty_set_enabled(pds_vfio);
342 
343 	pds_vfio_print_guest_region_info(pds_vfio, max_regions);
344 
345 	kfree(region_info);
346 
347 	return 0;
348 
349 out_dirty_disable:
350 	pds_vfio_dirty_disable_cmd(pds_vfio);
351 out_free_region_info:
352 	kfree(region_info);
353 	return err;
354 }
355 
356 void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
357 {
358 	if (pds_vfio_dirty_is_enabled(pds_vfio)) {
359 		pds_vfio_dirty_set_disabled(pds_vfio);
360 		if (send_cmd)
361 			pds_vfio_dirty_disable_cmd(pds_vfio);
362 		pds_vfio_dirty_free_sgl(pds_vfio);
363 		pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty);
364 		pds_vfio_dirty_free_regions(&pds_vfio->dirty);
365 	}
366 
367 	if (send_cmd)
368 		pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
369 }
370 
371 static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
372 				  struct pds_vfio_region *region,
373 				  unsigned long *seq_ack_bmp, u32 offset,
374 				  u32 bmp_bytes, bool read_seq)
375 {
376 	const char *bmp_type_str = read_seq ? "read_seq" : "write_ack";
377 	u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
378 	struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
379 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;
380 	unsigned long long npages;
381 	struct sg_table sg_table;
382 	struct scatterlist *sg;
383 	struct page **pages;
384 	u32 page_offset;
385 	const void *bmp;
386 	size_t size;
387 	u16 num_sge;
388 	int err;
389 	int i;
390 
391 	bmp = (void *)((u64)seq_ack_bmp + offset);
392 	page_offset = offset_in_page(bmp);
393 	bmp -= page_offset;
394 
395 	/*
396 	 * Start and end of bitmap section to seq/ack might not be page
397 	 * aligned, so use the page_offset to account for that so there
398 	 * will be enough pages to represent the bmp_bytes
399 	 */
400 	npages = DIV_ROUND_UP_ULL(bmp_bytes + page_offset, PAGE_SIZE);
401 	pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
402 	if (!pages)
403 		return -ENOMEM;
404 
405 	for (unsigned long long i = 0; i < npages; i++) {
406 		struct page *page = vmalloc_to_page(bmp);
407 
408 		if (!page) {
409 			err = -EFAULT;
410 			goto out_free_pages;
411 		}
412 
413 		pages[i] = page;
414 		bmp += PAGE_SIZE;
415 	}
416 
417 	err = sg_alloc_table_from_pages(&sg_table, pages, npages, page_offset,
418 					bmp_bytes, GFP_KERNEL);
419 	if (err)
420 		goto out_free_pages;
421 
422 	err = dma_map_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
423 	if (err)
424 		goto out_free_sg_table;
425 
426 	for_each_sgtable_dma_sg(&sg_table, sg, i) {
427 		struct pds_lm_sg_elem *sg_elem = &region->sgl[i];
428 
429 		sg_elem->addr = cpu_to_le64(sg_dma_address(sg));
430 		sg_elem->len = cpu_to_le32(sg_dma_len(sg));
431 	}
432 
433 	num_sge = sg_table.nents;
434 	size = num_sge * sizeof(struct pds_lm_sg_elem);
435 	offset += region->dev_bmp_offset_start_byte;
436 	dma_sync_single_for_device(pdsc_dev, region->sgl_addr, size, dma_dir);
437 	err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, region->sgl_addr, num_sge,
438 					 offset, bmp_bytes, read_seq);
439 	if (err)
440 		dev_err(&pdev->dev,
441 			"Dirty bitmap %s failed offset %u bmp_bytes %u num_sge %u DMA 0x%llx: %pe\n",
442 			bmp_type_str, offset, bmp_bytes,
443 			num_sge, region->sgl_addr, ERR_PTR(err));
444 	dma_sync_single_for_cpu(pdsc_dev, region->sgl_addr, size, dma_dir);
445 
446 	dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
447 out_free_sg_table:
448 	sg_free_table(&sg_table);
449 out_free_pages:
450 	kfree(pages);
451 
452 	return err;
453 }
454 
455 static int pds_vfio_dirty_write_ack(struct pds_vfio_pci_device *pds_vfio,
456 				   struct pds_vfio_region *region,
457 				    u32 offset, u32 len)
458 {
459 
460 	return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_ack,
461 				      offset, len, WRITE_ACK);
462 }
463 
464 static int pds_vfio_dirty_read_seq(struct pds_vfio_pci_device *pds_vfio,
465 				   struct pds_vfio_region *region,
466 				   u32 offset, u32 len)
467 {
468 	return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_seq,
469 				      offset, len, READ_SEQ);
470 }
471 
472 static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
473 					  struct pds_vfio_region *region,
474 					  struct iova_bitmap *dirty_bitmap,
475 					  u32 bmp_offset, u32 len_bytes)
476 {
477 	u64 page_size = region->page_size;
478 	u64 region_start = region->start;
479 	u32 bmp_offset_bit;
480 	__le64 *seq, *ack;
481 	int dword_count;
482 
483 	dword_count = len_bytes / sizeof(u64);
484 	seq = (__le64 *)((u64)region->host_seq + bmp_offset);
485 	ack = (__le64 *)((u64)region->host_ack + bmp_offset);
486 	bmp_offset_bit = bmp_offset * 8;
487 
488 	for (int i = 0; i < dword_count; i++) {
489 		u64 xor = le64_to_cpu(seq[i]) ^ le64_to_cpu(ack[i]);
490 
491 		/* prepare for next write_ack call */
492 		ack[i] = seq[i];
493 
494 		for (u8 bit_i = 0; bit_i < BITS_PER_TYPE(u64); ++bit_i) {
495 			if (xor & BIT(bit_i)) {
496 				u64 abs_bit_i = bmp_offset_bit +
497 						i * BITS_PER_TYPE(u64) + bit_i;
498 				u64 addr = abs_bit_i * page_size + region_start;
499 
500 				iova_bitmap_set(dirty_bitmap, addr, page_size);
501 			}
502 		}
503 	}
504 
505 	return 0;
506 }
507 
508 static struct pds_vfio_region *
509 pds_vfio_get_region(struct pds_vfio_pci_device *pds_vfio, unsigned long iova)
510 {
511 	struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
512 
513 	for (int i = 0; i < dirty->num_regions; i++) {
514 		struct pds_vfio_region *region = &dirty->regions[i];
515 
516 		if (iova >= region->start &&
517 		    iova < (region->start + region->size))
518 			return region;
519 	}
520 
521 	return NULL;
522 }
523 
524 static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
525 			       struct iova_bitmap *dirty_bitmap,
526 			       unsigned long iova, unsigned long length)
527 {
528 	struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
529 	struct pds_vfio_region *region;
530 	u64 bmp_offset, bmp_bytes;
531 	u64 bitmap_size, pages;
532 	int err;
533 
534 	dev_dbg(dev, "vf%u: Get dirty page bitmap\n", pds_vfio->vf_id);
535 
536 	if (!pds_vfio_dirty_is_enabled(pds_vfio)) {
537 		dev_err(dev, "vf%u: Sync failed, dirty tracking is disabled\n",
538 			pds_vfio->vf_id);
539 		return -EINVAL;
540 	}
541 
542 	region = pds_vfio_get_region(pds_vfio, iova);
543 	if (!region) {
544 		dev_err(dev, "vf%u: Failed to find region that contains iova 0x%lx length 0x%lx\n",
545 			pds_vfio->vf_id, iova, length);
546 		return -EINVAL;
547 	}
548 
549 	pages = DIV_ROUND_UP(length, region->page_size);
550 	bitmap_size =
551 		round_up(pages, sizeof(u64) * BITS_PER_BYTE) / BITS_PER_BYTE;
552 
553 	dev_dbg(dev,
554 		"vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n",
555 		pds_vfio->vf_id, iova, length, region->page_size,
556 		pages, bitmap_size);
557 
558 	if (!length || ((iova - region->start + length) > region->size)) {
559 		dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
560 			iova, length);
561 		return -EINVAL;
562 	}
563 
564 	/* bitmap is modified in 64 bit chunks */
565 	bmp_bytes = ALIGN(DIV_ROUND_UP(length / region->page_size,
566 				       sizeof(u64)), sizeof(u64));
567 	if (bmp_bytes != bitmap_size) {
568 		dev_err(dev,
569 			"Calculated bitmap bytes %llu not equal to bitmap size %llu\n",
570 			bmp_bytes, bitmap_size);
571 		return -EINVAL;
572 	}
573 
574 	if (bmp_bytes > region->bmp_bytes) {
575 		dev_err(dev,
576 			"Calculated bitmap bytes %llu larger than region's cached bmp_bytes %llu\n",
577 			bmp_bytes, region->bmp_bytes);
578 		return -EINVAL;
579 	}
580 
581 	bmp_offset = DIV_ROUND_UP((iova - region->start) /
582 				  region->page_size, sizeof(u64));
583 
584 	dev_dbg(dev,
585 		"Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
586 		iova, length, bmp_offset, bmp_bytes);
587 
588 	err = pds_vfio_dirty_read_seq(pds_vfio, region, bmp_offset, bmp_bytes);
589 	if (err)
590 		return err;
591 
592 	err = pds_vfio_dirty_process_bitmaps(pds_vfio, region, dirty_bitmap,
593 					     bmp_offset, bmp_bytes);
594 	if (err)
595 		return err;
596 
597 	err = pds_vfio_dirty_write_ack(pds_vfio, region, bmp_offset, bmp_bytes);
598 	if (err)
599 		return err;
600 
601 	return 0;
602 }
603 
604 int pds_vfio_dma_logging_report(struct vfio_device *vdev, unsigned long iova,
605 				unsigned long length, struct iova_bitmap *dirty)
606 {
607 	struct pds_vfio_pci_device *pds_vfio =
608 		container_of(vdev, struct pds_vfio_pci_device,
609 			     vfio_coredev.vdev);
610 	int err;
611 
612 	mutex_lock(&pds_vfio->state_mutex);
613 	err = pds_vfio_dirty_sync(pds_vfio, dirty, iova, length);
614 	mutex_unlock(&pds_vfio->state_mutex);
615 
616 	return err;
617 }
618 
619 int pds_vfio_dma_logging_start(struct vfio_device *vdev,
620 			       struct rb_root_cached *ranges, u32 nnodes,
621 			       u64 *page_size)
622 {
623 	struct pds_vfio_pci_device *pds_vfio =
624 		container_of(vdev, struct pds_vfio_pci_device,
625 			     vfio_coredev.vdev);
626 	int err;
627 
628 	mutex_lock(&pds_vfio->state_mutex);
629 	pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_IN_PROGRESS);
630 	err = pds_vfio_dirty_enable(pds_vfio, ranges, nnodes, page_size);
631 	mutex_unlock(&pds_vfio->state_mutex);
632 
633 	return err;
634 }
635 
636 int pds_vfio_dma_logging_stop(struct vfio_device *vdev)
637 {
638 	struct pds_vfio_pci_device *pds_vfio =
639 		container_of(vdev, struct pds_vfio_pci_device,
640 			     vfio_coredev.vdev);
641 
642 	mutex_lock(&pds_vfio->state_mutex);
643 	pds_vfio_dirty_disable(pds_vfio, true);
644 	mutex_unlock(&pds_vfio->state_mutex);
645 
646 	return 0;
647 }
648