summaryrefslogtreecommitdiffstats
path: root/kernel/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/s390/mm')
-rw-r--r--kernel/arch/s390/mm/init.c3
-rw-r--r--kernel/arch/s390/mm/mmap.c6
-rw-r--r--kernel/arch/s390/mm/pgtable.c89
3 files changed, 35 insertions, 63 deletions
diff --git a/kernel/arch/s390/mm/init.c b/kernel/arch/s390/mm/init.c
index c722400c7..feff9caf8 100644
--- a/kernel/arch/s390/mm/init.c
+++ b/kernel/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ S390_lowcore.kernel_asce = init_mm.context.asce;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/kernel/arch/s390/mm/mmap.c b/kernel/arch/s390/mm/mmap.c
index ea01477b4..f2b6b1d9c 100644
--- a/kernel/arch/s390/mm/mmap.c
+++ b/kernel/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
if (!(flags & MAP_FIXED))
addr = 0;
if ((addr + len) >= TASK_SIZE)
- return crst_table_upgrade(current->mm, 1UL << 53);
+ return crst_table_upgrade(current->mm);
return 0;
}
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, 1UL << 53);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, 1UL << 53);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/kernel/arch/s390/mm/pgtable.c b/kernel/arch/s390/mm/pgtable.c
index 54ef3bc01..8345ae1f1 100644
--- a/kernel/arch/s390/mm/pgtable.c
+++ b/kernel/arch/s390/mm/pgtable.c
@@ -49,81 +49,52 @@ static void __crst_table_upgrade(void *arg)
__tlb_flush_local();
}
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
{
unsigned long *table, *pgd;
- unsigned long entry;
- int flush;
- BUG_ON(limit > (1UL << 53));
- flush = 0;
-repeat:
+ /* upgrade should only happen from 3 to 4 levels */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
+
spin_lock_bh(&mm->page_table_lock);
- if (mm->context.asce_limit < limit) {
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit <= (1UL << 31)) {
- entry = _REGION3_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- } else {
- entry = _REGION2_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION2;
- }
- crst_table_init(table, entry);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->task_size = mm->context.asce_limit;
- table = NULL;
- flush = 1;
- }
+ pgd = (unsigned long *) mm->pgd;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+ mm->pgd = (pgd_t *) table;
+ mm->context.asce_limit = 1UL << 53;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm->task_size = mm->context.asce_limit;
spin_unlock_bh(&mm->page_table_lock);
- if (table)
- crst_table_free(mm, table);
- if (mm->context.asce_limit < limit)
- goto repeat;
- if (flush)
- on_each_cpu(__crst_table_upgrade, mm, 0);
+
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
{
pgd_t *pgd;
+ /* downgrade should only happen from 3 to 2 levels (compat only) */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
- while (mm->context.asce_limit > limit) {
- pgd = mm->pgd;
- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
- case _REGION_ENTRY_TYPE_R2:
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- break;
- case _REGION_ENTRY_TYPE_R3:
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_SEGMENT;
- break;
- default:
- BUG();
- }
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
- }
+
+ pgd = mm->pgd;
+ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->context.asce_limit = 1UL << 31;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+
if (current->active_mm == mm)
set_user_asce(mm);
}
@@ -195,7 +166,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
static void gmap_flush_tlb(struct gmap *gmap)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
+ __tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
}
@@ -234,7 +205,7 @@ void gmap_free(struct gmap *gmap)
/* Flush tlb. */
if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
+ __tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();