summaryrefslogtreecommitdiffstats
path: root/kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r--kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c b/kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 1a4acf8bf..fb37290a5 100644
--- a/kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/kernel/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -70,7 +70,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
}
/* Lastly try successively smaller sizes from the page allocator */
- while (!hpt && order > PPC_MIN_HPT_ORDER) {
+ /* Only do this if userspace didn't specify a size via ioctl */
+ while (!hpt && order > PPC_MIN_HPT_ORDER && !htab_orderp) {
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
__GFP_NOWARN, order - PAGE_SHIFT);
if (!hpt)
@@ -543,7 +544,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
- hva, NULL);
+ hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (pte_write(pte))
@@ -650,7 +651,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm)
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
- slots = kvm->memslots;
+ slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
/*
* This assumes it is acceptable to lose reference and
@@ -761,6 +762,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+ if (rcbits & HPTE_R_C)
+ kvmppc_update_rmap_change(rmapp, psize);
if (rcbits & ~rev[i].guest_rpte) {
rev[i].guest_rpte = ptel | rcbits;
note_hpte_modification(kvm, &rev[i]);
@@ -927,8 +930,12 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
retry:
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_CHANGED) {
- *rmapp &= ~KVMPPC_RMAP_CHANGED;
+ long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER)
+ >> KVMPPC_RMAP_CHG_SHIFT;
+ *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER);
npages_dirty = 1;
+ if (change_order > PAGE_SHIFT)
+ npages_dirty = 1ul << (change_order - PAGE_SHIFT);
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);