summaryrefslogtreecommitdiffstats
path: root/kernel/arch/avr32/mm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/avr32/mm')
-rw-r--r--kernel/arch/avr32/mm/Makefile6
-rw-r--r--kernel/arch/avr32/mm/cache.c163
-rw-r--r--kernel/arch/avr32/mm/clear_page.S25
-rw-r--r--kernel/arch/avr32/mm/copy_page.S28
-rw-r--r--kernel/arch/avr32/mm/dma-coherent.c152
-rw-r--r--kernel/arch/avr32/mm/fault.c268
-rw-r--r--kernel/arch/avr32/mm/init.c125
-rw-r--r--kernel/arch/avr32/mm/ioremap.c93
-rw-r--r--kernel/arch/avr32/mm/tlb.c375
9 files changed, 1235 insertions, 0 deletions
diff --git a/kernel/arch/avr32/mm/Makefile b/kernel/arch/avr32/mm/Makefile
new file mode 100644
index 000000000..0066491f9
--- /dev/null
+++ b/kernel/arch/avr32/mm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Linux/AVR32 kernel.
+#
+
+obj-y += init.o clear_page.o copy_page.o dma-coherent.o
+obj-y += ioremap.o cache.o fault.o tlb.o
diff --git a/kernel/arch/avr32/mm/cache.c b/kernel/arch/avr32/mm/cache.c
new file mode 100644
index 000000000..85d635cd7
--- /dev/null
+++ b/kernel/arch/avr32/mm/cache.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/highmem.h>
+#include <linux/unistd.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/syscalls.h>
+
+/*
+ * If you attempt to flush anything more than this, you need superuser
+ * privileges. The value is completely arbitrary.
+ */
+#define CACHEFLUSH_MAX_LEN 1024
+
+void invalidate_dcache_region(void *start, size_t size)
+{
+ unsigned long v, begin, end, linesz, mask;
+
+ linesz = boot_cpu_data.dcache.linesz;
+ mask = linesz - 1;
+
+ /* when first and/or last cachelines are shared, flush them
+ * instead of invalidating ... never discard valid data!
+ */
+ begin = (unsigned long)start;
+ end = begin + size;
+
+ if (begin & mask) {
+ flush_dcache_line(start);
+ begin += linesz;
+ }
+ if (end & mask) {
+ flush_dcache_line((void *)end);
+ end &= ~mask;
+ }
+
+ /* remaining cachelines only need invalidation */
+ for (v = begin; v < end; v += linesz)
+ invalidate_dcache_line((void *)v);
+ flush_write_buffer();
+}
+
+void clean_dcache_region(void *start, size_t size)
+{
+ unsigned long v, begin, end, linesz;
+
+ linesz = boot_cpu_data.dcache.linesz;
+ begin = (unsigned long)start & ~(linesz - 1);
+ end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
+
+ for (v = begin; v < end; v += linesz)
+ clean_dcache_line((void *)v);
+ flush_write_buffer();
+}
+
+void flush_dcache_region(void *start, size_t size)
+{
+ unsigned long v, begin, end, linesz;
+
+ linesz = boot_cpu_data.dcache.linesz;
+ begin = (unsigned long)start & ~(linesz - 1);
+ end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
+
+ for (v = begin; v < end; v += linesz)
+ flush_dcache_line((void *)v);
+ flush_write_buffer();
+}
+
+void invalidate_icache_region(void *start, size_t size)
+{
+ unsigned long v, begin, end, linesz;
+
+ linesz = boot_cpu_data.icache.linesz;
+ begin = (unsigned long)start & ~(linesz - 1);
+ end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
+
+ for (v = begin; v < end; v += linesz)
+ invalidate_icache_line((void *)v);
+}
+
+static inline void __flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long v, linesz;
+
+ linesz = boot_cpu_data.dcache.linesz;
+ for (v = start; v < end; v += linesz) {
+ clean_dcache_line((void *)v);
+ invalidate_icache_line((void *)v);
+ }
+
+ flush_write_buffer();
+}
+
+/*
+ * This one is called after a module has been loaded.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long linesz;
+
+ linesz = boot_cpu_data.dcache.linesz;
+ __flush_icache_range(start & ~(linesz - 1),
+ (end + linesz - 1) & ~(linesz - 1));
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * This one is called from __do_fault() and do_swap_page().
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (vma->vm_flags & VM_EXEC) {
+ void *v = page_address(page);
+ __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE);
+ }
+}
+
+asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len)
+{
+ int ret;
+
+ if (len > CACHEFLUSH_MAX_LEN) {
+ ret = -EPERM;
+ if (!capable(CAP_SYS_ADMIN))
+ goto out;
+ }
+
+ ret = -EFAULT;
+ if (!access_ok(VERIFY_WRITE, addr, len))
+ goto out;
+
+ switch (operation) {
+ case CACHE_IFLUSH:
+ flush_icache_range((unsigned long)addr,
+ (unsigned long)addr + len);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ return ret;
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_range((unsigned long)dst,
+ (unsigned long)dst + len);
+}
diff --git a/kernel/arch/avr32/mm/clear_page.S b/kernel/arch/avr32/mm/clear_page.S
new file mode 100644
index 000000000..5d70dca00
--- /dev/null
+++ b/kernel/arch/avr32/mm/clear_page.S
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+/*
+ * clear_page
+ * r12: P1 address (to)
+ */
+ .text
+ .global clear_page
+clear_page:
+ sub r9, r12, -PAGE_SIZE
+ mov r10, 0
+ mov r11, 0
+0: st.d r12++, r10
+ cp r12, r9
+ brne 0b
+ mov pc, lr
diff --git a/kernel/arch/avr32/mm/copy_page.S b/kernel/arch/avr32/mm/copy_page.S
new file mode 100644
index 000000000..c2b375294
--- /dev/null
+++ b/kernel/arch/avr32/mm/copy_page.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+/*
+ * copy_page
+ *
+ * r12 to (P1 address)
+ * r11 from (P1 address)
+ * r8-r10 scratch
+ */
+ .text
+ .global copy_page
+copy_page:
+ sub r10, r11, -(1 << PAGE_SHIFT)
+ /* pref r11[0] */
+1: /* pref r11[8] */
+ ld.d r8, r11++
+ st.d r12++, r8
+ cp r11, r10
+ brlo 1b
+ mov pc, lr
diff --git a/kernel/arch/avr32/mm/dma-coherent.c b/kernel/arch/avr32/mm/dma-coherent.c
new file mode 100644
index 000000000..50cdb5b10
--- /dev/null
+++ b/kernel/arch/avr32/mm/dma-coherent.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+
+#include <asm/addrspace.h>
+#include <asm/cacheflush.h>
+
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
+{
+ /*
+ * No need to sync an uncached area
+ */
+ if (PXSEG(vaddr) == P2SEG)
+ return;
+
+ switch (direction) {
+ case DMA_FROM_DEVICE: /* invalidate only */
+ invalidate_dcache_region(vaddr, size);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ clean_dcache_region(vaddr, size);
+ break;
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ flush_dcache_region(vaddr, size);
+ break;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(dma_cache_sync);
+
+static struct page *__dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page, *free, *end;
+ int order;
+
+ /* Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on AVR32 as it is not supported on this
+ * platform--see CONFIG_HUGETLB_PAGE. */
+ gfp &= ~(__GFP_COMP);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
+ split_page(page, order);
+
+ /*
+ * When accessing physical memory with valid cache data, we
+ * get a cache hit even if the virtual memory region is marked
+ * as uncached.
+ *
+ * Since the memory is newly allocated, there is no point in
+ * doing a writeback. If the previous owner cares, he should
+ * have flushed the cache before releasing the memory.
+ */
+ invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
+
+ *handle = page_to_bus(page);
+ free = page + (size >> PAGE_SHIFT);
+ end = page + (1 << order);
+
+ /*
+ * Free any unused pages
+ */
+ while (free < end) {
+ __free_page(free);
+ free++;
+ }
+
+ return page;
+}
+
+static void __dma_free(struct device *dev, size_t size,
+ struct page *page, dma_addr_t handle)
+{
+ struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
+
+ while (page < end)
+ __free_page(page++);
+}
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page;
+ void *ret = NULL;
+
+ page = __dma_alloc(dev, size, handle, gfp);
+ if (page)
+ ret = phys_to_uncached(page_to_phys(page));
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
+{
+ void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
+ struct page *page;
+
+ pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
+ cpu_addr, (unsigned long)handle, (unsigned)size);
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+ __dma_free(dev, size, page, handle);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page;
+ dma_addr_t phys;
+
+ page = __dma_alloc(dev, size, handle, gfp);
+ if (!page)
+ return NULL;
+
+ phys = page_to_phys(page);
+ *handle = phys;
+
+ /* Now, map the page into P3 with write-combining turned on */
+ return __ioremap(phys, size, _PAGE_BUFFER);
+}
+EXPORT_SYMBOL(dma_alloc_writecombine);
+
+void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
+{
+ struct page *page;
+
+ iounmap(cpu_addr);
+
+ page = phys_to_page(handle);
+ __dma_free(dev, size, page, handle);
+}
+EXPORT_SYMBOL(dma_free_writecombine);
diff --git a/kernel/arch/avr32/mm/fault.c b/kernel/arch/avr32/mm/fault.c
new file mode 100644
index 000000000..c03533937
--- /dev/null
+++ b/kernel/arch/avr32/mm/fault.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * Based on linux/arch/sh/mm/fault.c:
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+
+#include <asm/mmu_context.h>
+#include <asm/sysreg.h>
+#include <asm/tlb.h>
+
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+ int ret = 0;
+
+ if (!user_mode(regs)) {
+ if (kprobe_running() && kprobe_fault_handler(regs, trap))
+ ret = 1;
+ }
+
+ return ret;
+}
+#else
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+ return 0;
+}
+#endif
+
+int exception_trace = 1;
+
+/*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+ *
+ * ecr is the Exception Cause Register. Possible values are:
+ * 6: Protection fault (instruction access)
+ * 15: Protection fault (read access)
+ * 16: Protection fault (write access)
+ * 20: Page not found (instruction access)
+ * 24: Page not found (read access)
+ * 28: Page not found (write access)
+ */
+asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ const struct exception_table_entry *fixup;
+ unsigned long address;
+ unsigned long page;
+ long signr;
+ int code;
+ int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ if (notify_page_fault(regs, ecr))
+ return;
+
+ address = sysreg_read(TLBEAR);
+
+ tsk = current;
+ mm = tsk->mm;
+
+ signr = SIGSEGV;
+ code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+ if (faulthandler_disabled() || !mm || regs->sr & SYSREG_BIT(GM))
+ goto no_context;
+
+ local_irq_enable();
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so we
+ * can handle it...
+ */
+good_area:
+ code = SEGV_ACCERR;
+
+ switch (ecr) {
+ case ECR_PROTECTION_X:
+ case ECR_TLB_MISS_X:
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ break;
+ case ECR_PROTECTION_R:
+ case ECR_TLB_MISS_R:
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ goto bad_area;
+ break;
+ case ECR_PROTECTION_W:
+ case ECR_TLB_MISS_W:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ break;
+ default:
+ panic("Unhandled case %lu in do_page_fault!", ecr);
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+ * fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would have
+ * already released it in __lock_page_or_retry() in
+ * mm/filemap.c.
+ */
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory
+ * map. Fix it, but check if it's kernel or user first...
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+ is_global_init(tsk) ? KERN_EMERG : KERN_INFO,
+ tsk->comm, tsk->pid, address, regs->pc,
+ regs->sp, ecr);
+ _exception(SIGSEGV, regs, code, address);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have
+ * to terminate things with extreme prejudice.
+ */
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT
+ "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n", address);
+
+ page = sysreg_read(PTBR);
+ printk(KERN_ALERT "ptbr = %08lx", page);
+ if (address >= TASK_SIZE)
+ page = (unsigned long)swapper_pg_dir;
+ if (page) {
+ page = ((unsigned long *)page)[address >> 22];
+ printk(" pgd = %08lx", page);
+ if (page & _PAGE_PRESENT) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
+ printk(" pte = %08lx", page);
+ }
+ }
+ printk("\n");
+ die("Kernel access of bad area", regs, signr);
+ return;
+
+ /*
+ * We ran out of memory, or some other thing happened to us
+ * that made us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ signr = SIGBUS;
+ code = BUS_ADRERR;
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (exception_trace)
+ printk("%s%s[%d]: bus error at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+ is_global_init(tsk) ? KERN_EMERG : KERN_INFO,
+ tsk->comm, tsk->pid, address, regs->pc,
+ regs->sp, ecr);
+
+ _exception(SIGBUS, regs, BUS_ADRERR, address);
+}
+
+asmlinkage void do_bus_error(unsigned long addr, int write_access,
+ struct pt_regs *regs)
+{
+ printk(KERN_ALERT
+ "Bus error at physical address 0x%08lx (%s access)\n",
+ addr, write_access ? "write" : "read");
+ printk(KERN_INFO "DTLB dump:\n");
+ dump_dtlb();
+ die("Bus Error", regs, SIGKILL);
+}
diff --git a/kernel/arch/avr32/mm/init.c b/kernel/arch/avr32/mm/init.c
new file mode 100644
index 000000000..def5391d9
--- /dev/null
+++ b/kernel/arch/avr32/mm/init.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/pagemap.h>
+#include <linux/nodemask.h>
+
+#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
+
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * Cache of MMU context last used.
+ */
+unsigned long mmu_context_cache = NO_CONTEXT;
+
+/*
+ * paging_init() sets up the page tables
+ *
+ * This routine also unmaps the page at virtual kernel address 0, so
+ * that we can trap those pesky NULL-reference errors in the kernel.
+ */
+void __init paging_init(void)
+{
+ extern unsigned long _evba;
+ void *zero_page;
+ int nid;
+
+ /*
+ * Make sure we can handle exceptions before enabling
+ * paging. Not that we should ever _get_ any exceptions this
+ * early, but you never know...
+ */
+ printk("Exception vectors start at %p\n", &_evba);
+ sysreg_write(EVBA, (unsigned long)&_evba);
+
+ /*
+ * Since we are ready to handle exceptions now, we should let
+ * the CPU generate them...
+ */
+ __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT));
+
+ /*
+ * Allocate the zero page. The allocator will panic if it
+ * can't satisfy the request, so no need to check.
+ */
+ zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0),
+ PAGE_SIZE);
+
+ sysreg_write(PTBR, (unsigned long)swapper_pg_dir);
+ enable_mmu();
+ printk ("CPU: Paging enabled\n");
+
+ for_each_online_node(nid) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long low, start_pfn;
+
+ start_pfn = pgdat->bdata->node_min_pfn;
+ low = pgdat->bdata->node_low_pfn;
+
+ memset(zones_size, 0, sizeof(zones_size));
+ zones_size[ZONE_NORMAL] = low - start_pfn;
+
+ printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
+ nid, start_pfn, low);
+
+ free_area_init_node(nid, zones_size, start_pfn, NULL);
+
+ printk("Node %u: mem_map starts at %p\n",
+ pgdat->node_id, pgdat->node_mem_map);
+ }
+
+ mem_map = NODE_DATA(0)->node_mem_map;
+
+ empty_zero_page = virt_to_page(zero_page);
+ flush_dcache_page(empty_zero_page);
+}
+
+void __init mem_init(void)
+{
+ pg_data_t *pgdat;
+
+ high_memory = NULL;
+ for_each_online_pgdat(pgdat)
+ high_memory = max_t(void *, high_memory,
+ __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
+
+ set_max_mapnr(MAP_NR(high_memory));
+ free_all_bootmem();
+ mem_init_print_info(NULL);
+}
+
+void free_initmem(void)
+{
+ free_initmem_default(-1);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
diff --git a/kernel/arch/avr32/mm/ioremap.c b/kernel/arch/avr32/mm/ioremap.c
new file mode 100644
index 000000000..7def0d84c
--- /dev/null
+++ b/kernel/arch/avr32/mm/ioremap.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/addrspace.h>
+
+/*
+ * Re-map an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access physical
+ * memory directly.
+ */
+void __iomem *__ioremap(unsigned long phys_addr, size_t size,
+ unsigned long flags)
+{
+ unsigned long addr;
+ struct vm_struct *area;
+ unsigned long offset, last_addr;
+ pgprot_t prot;
+
+ /*
+ * Check if we can simply use the P4 segment. This area is
+ * uncacheable, so if caching/buffering is requested, we can't
+ * use it.
+ */
+ if ((phys_addr >= P4SEG) && (flags == 0))
+ return (void __iomem *)phys_addr;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * XXX: When mapping regular RAM, we'd better make damn sure
+ * it's never used for anything else. But this is really the
+ * caller's responsibility...
+ */
+ if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr)
+ return (void __iomem *)P2SEGADDR(phys_addr);
+
+ /* Mappings have to be page-aligned */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_RW | _PAGE_DIRTY
+ | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags);
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ area->phys_addr = phys_addr;
+ addr = (unsigned long )area->addr;
+ if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ return (void __iomem *)(offset + (char *)addr);
+}
+EXPORT_SYMBOL(__ioremap);
+
+void __iounmap(void __iomem *addr)
+{
+ struct vm_struct *p;
+
+ if ((unsigned long)addr >= P4SEG)
+ return;
+ if (PXSEG(addr) == P2SEG)
+ return;
+
+ p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
+ if (unlikely(!p)) {
+ printk (KERN_ERR "iounmap: bad address %p\n", addr);
+ return;
+ }
+
+ kfree (p);
+}
+EXPORT_SYMBOL(__iounmap);
diff --git a/kernel/arch/avr32/mm/tlb.c b/kernel/arch/avr32/mm/tlb.c
new file mode 100644
index 000000000..0da23109f
--- /dev/null
+++ b/kernel/arch/avr32/mm/tlb.c
@@ -0,0 +1,375 @@
+/*
+ * AVR32 TLB operations
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+
+#include <asm/mmu_context.h>
+
+/* TODO: Get the correct number from the CONFIG1 system register */
+#define NR_TLB_ENTRIES 32
+
+static void show_dtlb_entry(unsigned int index)
+{
+ u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mmucr_save = sysreg_read(MMUCR);
+ tlbehi_save = sysreg_read(TLBEHI);
+ mmucr = SYSREG_BFINS(DRP, index, mmucr_save);
+ sysreg_write(MMUCR, mmucr);
+
+ __builtin_tlbr();
+ cpu_sync_pipeline();
+
+ tlbehi = sysreg_read(TLBEHI);
+ tlbelo = sysreg_read(TLBELO);
+
+ printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
+ index,
+ SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
+ SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
+ SYSREG_BFEXT(ASID, tlbehi),
+ SYSREG_BFEXT(VPN, tlbehi) >> 2,
+ SYSREG_BFEXT(PFN, tlbelo) >> 2,
+ SYSREG_BFEXT(AP, tlbelo),
+ SYSREG_BFEXT(SZ, tlbelo),
+ SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ',
+ SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ',
+ SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ',
+ SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' ');
+
+ sysreg_write(MMUCR, mmucr_save);
+ sysreg_write(TLBEHI, tlbehi_save);
+ cpu_sync_pipeline();
+ local_irq_restore(flags);
+}
+
+void dump_dtlb(void)
+{
+ unsigned int i;
+
+ printk("ID V G ASID VPN PFN AP SZ C B W D\n");
+ for (i = 0; i < NR_TLB_ENTRIES; i++)
+ show_dtlb_entry(i);
+}
+
+static void update_dtlb(unsigned long address, pte_t pte)
+{
+ u32 tlbehi;
+ u32 mmucr;
+
+ /*
+ * We're not changing the ASID here, so no need to flush the
+ * pipeline.
+ */
+ tlbehi = sysreg_read(TLBEHI);
+ tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
+ tlbehi |= address & MMU_VPN_MASK;
+ tlbehi |= SYSREG_BIT(TLBEHI_V);
+ sysreg_write(TLBEHI, tlbehi);
+
+ /* Does this mapping already exist? */
+ __builtin_tlbs();
+ mmucr = sysreg_read(MMUCR);
+
+ if (mmucr & SYSREG_BIT(MMUCR_N)) {
+ /* Not found -- pick a not-recently-accessed entry */
+ unsigned int rp;
+ u32 tlbar = sysreg_read(TLBARLO);
+
+ rp = 32 - fls(tlbar);
+ if (rp == 32) {
+ rp = 0;
+ sysreg_write(TLBARLO, -1L);
+ }
+
+ mmucr = SYSREG_BFINS(DRP, rp, mmucr);
+ sysreg_write(MMUCR, mmucr);
+ }
+
+ sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
+
+ /* Let's go */
+ __builtin_tlbw();
+}
+
+void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ unsigned long flags;
+
+ /* ptrace may call this routine */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+ update_dtlb(address, *ptep);
+ local_irq_restore(flags);
+}
+
+static void __flush_tlb_page(unsigned long asid, unsigned long page)
+{
+ u32 mmucr, tlbehi;
+
+ /*
+ * Caller is responsible for masking out non-PFN bits in page
+ * and changing the current ASID if necessary. This means that
+ * we don't need to flush the pipeline after writing TLBEHI.
+ */
+ tlbehi = page | asid;
+ sysreg_write(TLBEHI, tlbehi);
+
+ __builtin_tlbs();
+ mmucr = sysreg_read(MMUCR);
+
+ if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
+ unsigned int entry;
+ u32 tlbarlo;
+
+ /* Clear the "valid" bit */
+ sysreg_write(TLBEHI, tlbehi);
+
+ /* mark the entry as "not accessed" */
+ entry = SYSREG_BFEXT(DRP, mmucr);
+ tlbarlo = sysreg_read(TLBARLO);
+ tlbarlo |= (0x80000000UL >> entry);
+ sysreg_write(TLBARLO, tlbarlo);
+
+ /* update the entry with valid bit clear */
+ __builtin_tlbw();
+ }
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
+ unsigned long flags, asid;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
+ page &= PAGE_MASK;
+
+ local_irq_save(flags);
+ if (vma->vm_mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+
+ __flush_tlb_page(asid, page);
+
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ } else {
+ unsigned long asid;
+ unsigned long saved_asid;
+
+ asid = mm->context & MMU_CONTEXT_ASID_MASK;
+ saved_asid = MMU_NO_ASID;
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+
+ if (mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+/*
+ * This function depends on the pages to be flushed having the G
+ * (global) bit set in their pte. This is true for all
+ * PAGE_KERNEL(_RO) pages.
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
+ flush_tlb_all();
+ } else {
+ unsigned long asid;
+
+ local_irq_save(flags);
+ asid = get_asid();
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Invalidate all TLB entries of this process by getting a new ASID */
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I));
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+
+static void *tlb_start(struct seq_file *tlb, loff_t *pos)
+{
+ static unsigned long tlb_index;
+
+ if (*pos >= NR_TLB_ENTRIES)
+ return NULL;
+
+ tlb_index = 0;
+ return &tlb_index;
+}
+
+static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
+{
+ unsigned long *index = v;
+
+ if (*index >= NR_TLB_ENTRIES - 1)
+ return NULL;
+
+ ++*pos;
+ ++*index;
+ return index;
+}
+
+static void tlb_stop(struct seq_file *tlb, void *v)
+{
+
+}
+
+static int tlb_show(struct seq_file *tlb, void *v)
+{
+ unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
+ unsigned long flags;
+ unsigned long *index = v;
+
+ if (*index == 0)
+ seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
+
+ BUG_ON(*index >= NR_TLB_ENTRIES);
+
+ local_irq_save(flags);
+ mmucr_save = sysreg_read(MMUCR);
+ tlbehi_save = sysreg_read(TLBEHI);
+ mmucr = SYSREG_BFINS(DRP, *index, mmucr_save);
+ sysreg_write(MMUCR, mmucr);
+
+ /* TLBR might change the ASID */
+ __builtin_tlbr();
+ cpu_sync_pipeline();
+
+ tlbehi = sysreg_read(TLBEHI);
+ tlbelo = sysreg_read(TLBELO);
+
+ sysreg_write(MMUCR, mmucr_save);
+ sysreg_write(TLBEHI, tlbehi_save);
+ cpu_sync_pipeline();
+ local_irq_restore(flags);
+
+ seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
+ *index,
+ SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
+ SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
+ SYSREG_BFEXT(ASID, tlbehi),
+ SYSREG_BFEXT(VPN, tlbehi) >> 2,
+ SYSREG_BFEXT(PFN, tlbelo) >> 2,
+ SYSREG_BFEXT(AP, tlbelo),
+ SYSREG_BFEXT(SZ, tlbelo),
+ SYSREG_BFEXT(TLBELO_C, tlbelo) ? '1' : '0',
+ SYSREG_BFEXT(B, tlbelo) ? '1' : '0',
+ SYSREG_BFEXT(W, tlbelo) ? '1' : '0',
+ SYSREG_BFEXT(TLBELO_D, tlbelo) ? '1' : '0');
+
+ return 0;
+}
+
+static const struct seq_operations tlb_ops = {
+ .start = tlb_start,
+ .next = tlb_next,
+ .stop = tlb_stop,
+ .show = tlb_show,
+};
+
+static int tlb_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &tlb_ops);
+}
+
+static const struct file_operations proc_tlb_operations = {
+ .open = tlb_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init proctlb_init(void)
+{
+ proc_create("tlb", 0, NULL, &proc_tlb_operations);
+ return 0;
+}
+late_initcall(proctlb_init);
+#endif /* CONFIG_PROC_FS */