summaryrefslogtreecommitdiffstats
path: root/kernel/mm/mremap.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/mm/mremap.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/mm/mremap.c')
-rw-r--r--kernel/mm/mremap.c63
1 files changed, 32 insertions, 31 deletions
diff --git a/kernel/mm/mremap.c b/kernel/mm/mremap.c
index 034e2d360..c25bc6268 100644
--- a/kernel/mm/mremap.c
+++ b/kernel/mm/mremap.c
@@ -22,6 +22,7 @@
#include <linux/mmu_notifier.h>
#include <linux/sched/sysctl.h>
#include <linux/uaccess.h>
+#include <linux/mm-arch-hooks.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -275,6 +276,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
need_rmap_locks);
if (moved_len < old_len) {
+ err = -ENOMEM;
+ } else if (vma->vm_ops && vma->vm_ops->mremap) {
+ err = vma->vm_ops->mremap(new_vma);
+ }
+
+ if (unlikely(err)) {
/*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
@@ -285,14 +292,10 @@ static unsigned long move_vma(struct vm_area_struct *vma,
vma = new_vma;
old_len = new_len;
old_addr = new_addr;
- new_addr = -ENOMEM;
- } else if (vma->vm_file && vma->vm_file->f_op->mremap) {
- err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
- if (err < 0) {
- move_page_tables(new_vma, new_addr, vma, old_addr,
- moved_len, true);
- return err;
- }
+ new_addr = err;
+ } else {
+ arch_remap(mm, old_addr, old_addr + old_len,
+ new_addr, new_addr + new_len);
}
/* Conceal VM_ACCOUNT so old reservation is not undone */
@@ -343,6 +346,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
+ unsigned long pgoff;
if (!vma || vma->vm_start > addr)
return ERR_PTR(-EFAULT);
@@ -354,17 +358,17 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
if (old_len > vma->vm_end - addr)
return ERR_PTR(-EFAULT);
+ if (new_len == old_len)
+ return vma;
+
/* Need to be careful about a growing mapping */
- if (new_len > old_len) {
- unsigned long pgoff;
-
- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
- return ERR_PTR(-EFAULT);
- pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
- pgoff += vma->vm_pgoff;
- if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
- return ERR_PTR(-EINVAL);
- }
+ pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+ pgoff += vma->vm_pgoff;
+ if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
+ return ERR_PTR(-EINVAL);
+
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
+ return ERR_PTR(-EFAULT);
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
@@ -397,19 +401,14 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
unsigned long charged = 0;
unsigned long map_flags;
- if (new_addr & ~PAGE_MASK)
+ if (offset_in_page(new_addr))
goto out;
if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
goto out;
- /* Check if the location we're moving into overlaps the
- * old location at all, and fail if it does.
- */
- if ((new_addr <= addr) && (new_addr+new_len) > addr)
- goto out;
-
- if ((addr <= new_addr) && (addr+old_len) > new_addr)
+ /* Ensure the old/new locations do not overlap */
+ if (addr + old_len > new_addr && new_addr + new_len > addr)
goto out;
ret = do_munmap(mm, new_addr, new_len);
@@ -436,11 +435,11 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
- if (ret & ~PAGE_MASK)
+ if (offset_in_page(ret))
goto out1;
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
- if (!(ret & ~PAGE_MASK))
+ if (!(offset_in_page(ret)))
goto out;
out1:
vm_unacct_memory(charged);
@@ -485,7 +484,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
return ret;
- if (addr & ~PAGE_MASK)
+ if (offset_in_page(addr))
return ret;
old_len = PAGE_ALIGN(old_len);
@@ -567,7 +566,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
- if (new_addr & ~PAGE_MASK) {
+ if (offset_in_page(new_addr)) {
ret = new_addr;
goto out;
}
@@ -575,8 +574,10 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
}
out:
- if (ret & ~PAGE_MASK)
+ if (offset_in_page(ret)) {
vm_unacct_memory(charged);
+ locked = 0;
+ }
up_write(&current->mm->mmap_sem);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);