pgtable.c (eadcbfa58ae8693f0d6a0f591d8f51d55cf068e1) pgtable.c (1aea9b3f921003f0880f0676ae85d87c9f1cb4a2)
1/*
2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>

--- 596 unchanged lines hidden (view full) ---

605
606/*
607 * Test and reset if a guest page is dirty
608 */
609bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
610{
611 spinlock_t *ptl;
612 pgd_t *pgd;
1/*
2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>

--- 596 unchanged lines hidden (view full) ---

605
606/*
607 * Test and reset if a guest page is dirty
608 */
609bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
610{
611 spinlock_t *ptl;
612 pgd_t *pgd;
613 p4d_t *p4d;
613 pud_t *pud;
614 pmd_t *pmd;
615 pgste_t pgste;
616 pte_t *ptep;
617 pte_t pte;
618 bool dirty;
619
620 pgd = pgd_offset(mm, addr);
614 pud_t *pud;
615 pmd_t *pmd;
616 pgste_t pgste;
617 pte_t *ptep;
618 pte_t pte;
619 bool dirty;
620
621 pgd = pgd_offset(mm, addr);
621 pud = pud_alloc(mm, pgd, addr);
622 p4d = p4d_alloc(mm, pgd, addr);
623 if (!p4d)
624 return false;
625 pud = pud_alloc(mm, p4d, addr);
622 if (!pud)
623 return false;
624 pmd = pmd_alloc(mm, pud, addr);
625 if (!pmd)
626 return false;
627 /* We can't run guests backed by huge pages, but userspace can
628 * still set them up and then try to migrate them without any
629 * migration support.

--- 314 unchanged lines hidden ---
626 if (!pud)
627 return false;
628 pmd = pmd_alloc(mm, pud, addr);
629 if (!pmd)
630 return false;
631 /* We can't run guests backed by huge pages, but userspace can
632 * still set them up and then try to migrate them without any
633 * migration support.

--- 314 unchanged lines hidden ---