/* * arch/sh/mm/cache-sh5.c * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2002 Benedict Gaster * Copyright (C) 2003 Richard Curnow * Copyright (C) 2003 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include #include #include #include #include #include #include #include #include extern void __weak sh4__flush_region_init(void); /* Wired TLB entry for the D-cache */ static unsigned long long dtlb_cache_slot; /* * The following group of functions deal with mapping and unmapping a * temporary page into a DTLB slot that has been set aside for exclusive * use. */ static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr) { local_irq_disable(); sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); } static inline void sh64_teardown_dtlb_cache_slot(void) { sh64_teardown_tlb_slot(dtlb_cache_slot); local_irq_enable(); } static inline void sh64_icache_inv_all(void) { unsigned long long addr, flag, data; unsigned long flags; addr = ICCR0; flag = ICCR0_ICI; data = 0; /* Make this a critical section for safety (probably not strictly necessary.) */ local_irq_save(flags); /* Without %1 it gets unexplicably wrong */ __asm__ __volatile__ ( "getcfg %3, 0, %0\n\t" "or %0, %2, %0\n\t" "putcfg %3, 0, %0\n\t" "synci" : "=&r" (data) : "0" (data), "r" (flag), "r" (addr)); local_irq_restore(flags); } static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) { /* Invalidate range of addresses [start,end] from the I-cache, where * the addresses lie in the kernel superpage. */ unsigned long long ullend, addr, aligned_start; aligned_start = (unsigned long long)(signed long long)(signed long) start; addr = L1_CACHE_ALIGN(aligned_start); ullend = (unsigned long long) (signed long long) (signed long) end; while (addr <= ullend) { __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); addr += L1_CACHE_BYTES; } } static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) { /* If we get called, we know that vma->vm_flags contains VM_EXEC. Also, eaddr is page-aligned. */ unsigned int cpu = smp_processor_id(); unsigned long long addr, end_addr; unsigned long flags = 0; unsigned long running_asid, vma_asid; addr = eaddr; end_addr = addr + PAGE_SIZE; /* Check whether we can use the current ASID for the I-cache invalidation. For example, if we're called via access_process_vm->flush_cache_page->here, (e.g. when reading from /proc), 'running_asid' will be that of the reader, not of the victim. Also, note the risk that we might get pre-empted between the ASID compare and blocking IRQs, and before we regain control, the pid->ASID mapping changes. However, the whole cache will get invalidated when the mapping is renewed, so the worst that can happen is that the loop below ends up invalidating somebody else's cache entries. */ running_asid = get_asid(); vma_asid = cpu_asid(cpu, vma->vm_mm); if (running_asid != vma_asid) { local_irq_save(flags); switch_and_save_asid(vma_asid); } while (addr < end_addr) { /* Worth unrolling a little */ __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); __asm__ __volatile__("icbi %0, 32" : : "r" (addr)); __asm__ __volatile__("icbi %0, 64" : : "r" (addr)); __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); addr += 128; } if (running_asid != vma_asid) { switch_and_save_asid(running_asid); local_irq_restore(flags); } } static void sh64_icache_inv_user_page_range(struct mm_struct *mm, unsigned long start, unsigned long end) { /* Used for invalidating big chunks of I-cache, i.e. assume the range is whole pages. If 'start' or 'end' is not page aligned, the code is conservative and invalidates to the ends of the enclosing pages. This is functionally OK, just a performance loss. */ /* See the comments below in sh64_dcache_purge_user_range() regarding the choice of algorithm. However, for the I-cache option (2) isn't available because there are no physical tags so aliases can't be resolved. The icbi instruction has to be used through the user mapping. Because icbi is cheaper than ocbp on a cache hit, it would be cheaper to use the selective code for a large range than is possible with the D-cache. Just assume 64 for now as a working figure. */ int n_pages; if (!mm) return; n_pages = ((end - start) >> PAGE_SHIFT); if (n_pages >= 64) { sh64_icache_inv_all(); } else { unsigned long aligned_start; unsigned long eaddr; unsigned long after_last_page_start; unsigned long mm_asid, current_asid; unsigned long flags = 0; mm_asid = cpu_asid(smp_processor_id(), mm); current_asid = get_asid(); if (mm_asid != current_asid) { /* Switch ASID and run the invalidate loop under cli */ local_irq_save(flags); switch_and_save_asid(mm_asid); } aligned_start = start & PAGE_MASK; after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); while (aligned_start < after_last_page_start) { struct vm_area_struct *vma; unsigned long vma_end; vma = find_vma(mm, aligned_start); if (!vma || (aligned_start <= vma->vm_end)) { /* Avoid getting stuck in an error condition */ aligned_start += PAGE_SIZE; continue; } vma_end = vma->vm_end; if (vma->vm_flags & VM_EXEC) { /* Executable */ eaddr = aligned_start; while (eaddr < vma_end) { sh64_icache_inv_user_page(vma, eaddr); eaddr += PAGE_SIZE; } } aligned_start = vma->vm_end; /* Skip to start of next region */ } if (mm_asid != current_asid) { switch_and_save_asid(current_asid); local_irq_restore(flags); } } } static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) { /* The icbi instruction never raises ITLBMISS. i.e. if there's not a cache hit on the virtual tag the instruction ends there, without a TLB lookup. */ unsigned long long aligned_start; unsigned long long ull_end; unsigned long long addr; ull_end = end; /* Just invalidate over the range using the natural addresses. TLB miss handling will be OK (TBC). Since it'
################################################################################
#
#    Licensed to the Apache Software Foundation (ASF) under one or more
#    contributor license agreements.  See the NOTICE file distributed with
#    this work for additional information regarding copyright ownership.
#    The ASF licenses this file to You under the Apache License, Version 2.0
#    (the "License"); you may not use this file except in compliance with
#    the License.  You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.
#
################################################################################

#
# Defines if the startlvl should be respected during feature startup. The default value is true. The default
# behavior for 2.x is false (!) for this property
#
# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to
# set this to false, please use this only as a temporary solution!
#
#respectStartLvlDuringFeatureStartup=true


#
# Defines if the startlvl should be respected during feature uninstall. The default value is true.
# If true, means stop bundles respecting the descend order of start level in a certain feature.
#
#respectStartLvlDuringFeatureUninstall=true

#
# Comma separated list of features repositories to register by default
#
featuresRepositories = \
    mvn:org.opendaylight.integration/features-index/0.7.1/xml/features, \
    mvn:org.apache.karaf.features/framework/4.0.10/xml/features, \
    mvn:org.apache.karaf.features/standard/4.0.10/xml/features

#
# Comma separated list of features to install at startup
#
featuresBoot=config,standard,wrap,package,kar,ssh,management,odl-restconf-all,odl-aaa-authn,odl-netvirt-openstack,odl-mdsal-apidocs,odl-dlux-core,odl-netvirt-ui

#
# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
#
featuresBootAsynchronous=false

#
# Store cfg file for config element in feature
#
#configCfgStore=true
lines tagged with its ASID won't be visible for the rest of the * lifetime of this ASID cycle. Before the ASID gets reused, there * will be a flush_cache_all. Hence we don't need to touch the * I-cache. This is similar to the lack of action needed in * flush_tlb_mm - see fault.c. */ static void sh5_flush_cache_mm(void *unused) { sh64_dcache_purge_all(); } /* * Invalidate (from both caches) the range [start,end) of virtual * addresses from the user address space specified by mm, after writing * back any dirty data. * * Note, 'end' is 1 byte beyond the end of the range to flush. */ static void sh5_flush_cache_range(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; unsigned long start, end; vma = data->vma; start = data->addr1; end = data->addr2; sh64_dcache_purge_user_range(vma->vm_mm, start, end); sh64_icache_inv_user_page_range(vma->vm_mm, start, end); } /* * Invalidate any entries in either cache for the vma within the user * address space vma->vm_mm for the page starting at virtual address * 'eaddr'. This seems to be used primarily in breaking COW. Note, * the I-cache must be searched too in case the page in question is * both writable and being executed from (e.g. stack trampolines.) * * Note, this is called with pte lock held. */ static void sh5_flush_cache_page(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; unsigned long eaddr, pfn; vma = data->vma; eaddr = data->addr1; pfn = data->addr2; sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); if (vma->vm_flags & VM_EXEC) sh64_icache_inv_user_page(vma, eaddr); } static void sh5_flush_dcache_page(void *page) { sh64_dcache_purge_phy_page(page_to_phys((struct page *)page)); wmb(); } /* * Flush the range [start,end] of kernel virtual address space from * the I-cache. The corresponding range must be purged from the * D-cache also because the SH-5 doesn't have cache snooping between * the caches. The addresses will be visible through the superpage * mapping, therefore it's guaranteed that there no cache entries for * the range in cache sets of the wrong colour. */ static void sh5_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; start = data->addr1; end = data->addr2; __flush_purge_region((void *)start, end); wmb(); sh64_icache_inv_kernel_range(start, end); } /* * For the address range [start,end), write back the data from the * D-cache and invalidate the corresponding region of the I-cache for the * current process. Used to flush signal trampolines on the stack to * make them executable. */ static void sh5_flush_cache_sigtramp(void *vaddr) { unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; __flush_wback_region(vaddr, L1_CACHE_BYTES); wmb(); sh64_icache_inv_current_user_range((unsigned long)vaddr, end); } void __init sh5_cache_init(void) { local_flush_cache_all = sh5_flush_cache_all; local_flush_cache_mm = sh5_flush_cache_mm; local_flush_cache_dup_mm = sh5_flush_cache_mm; local_flush_cache_page = sh5_flush_cache_page; local_flush_cache_range = sh5_flush_cache_range; local_flush_dcache_page = sh5_flush_dcache_page; local_flush_icache_range = sh5_flush_icache_range; local_flush_cache_sigtramp = sh5_flush_cache_sigtramp; /* Reserve a slot for dcache colouring in the DTLB */ dtlb_cache_slot = sh64_get_wired_dtlb_entry(); sh4__flush_region_init(); }