summaryrefslogtreecommitdiffstats
path: root/qemu/cputlb.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/cputlb.c
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/cputlb.c')
-rw-r--r--qemu/cputlb.c182
1 files changed, 140 insertions, 42 deletions
diff --git a/qemu/cputlb.c b/qemu/cputlb.c
index a50608676..466663b56 100644
--- a/qemu/cputlb.c
+++ b/qemu/cputlb.c
@@ -17,7 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/memory.h"
@@ -30,8 +30,30 @@
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
-//#define DEBUG_TLB
-//#define DEBUG_TLB_CHECK
+/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
+/* #define DEBUG_TLB */
+/* #define DEBUG_TLB_LOG */
+
+#ifdef DEBUG_TLB
+# define DEBUG_TLB_GATE 1
+# ifdef DEBUG_TLB_LOG
+# define DEBUG_TLB_LOG_GATE 1
+# else
+# define DEBUG_TLB_LOG_GATE 0
+# endif
+#else
+# define DEBUG_TLB_GATE 0
+# define DEBUG_TLB_LOG_GATE 0
+#endif
+
+#define tlb_debug(fmt, ...) do { \
+ if (DEBUG_TLB_LOG_GATE) { \
+ qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
+ ## __VA_ARGS__); \
+ } else if (DEBUG_TLB_GATE) { \
+ fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
+ } \
+} while (0)
/* statistics */
int tlb_flush_count;
@@ -52,9 +74,8 @@ void tlb_flush(CPUState *cpu, int flush_global)
{
CPUArchState *env = cpu->env_ptr;
-#if defined(DEBUG_TLB)
- printf("tlb_flush:\n");
-#endif
+ tlb_debug("(%d)\n", flush_global);
+
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
@@ -69,6 +90,39 @@ void tlb_flush(CPUState *cpu, int flush_global)
tlb_flush_count++;
}
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ tlb_debug("start\n");
+ /* must reset current TB so that interrupts cannot modify the
+ links while we are modifying them */
+ cpu->current_tb = NULL;
+
+ for (;;) {
+ int mmu_idx = va_arg(argp, int);
+
+ if (mmu_idx < 0) {
+ break;
+ }
+
+ tlb_debug("%d\n", mmu_idx);
+
+ memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
+ memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+ }
+
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+}
+
+void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+ va_list argp;
+ va_start(argp, cpu);
+ v_tlb_flush_by_mmuidx(cpu, argp);
+ va_end(argp);
+}
+
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->addr_read &
@@ -87,16 +141,14 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
int i;
int mmu_idx;
-#if defined(DEBUG_TLB)
- printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
-#endif
+ tlb_debug("page :" TARGET_FMT_lx "\n", addr);
+
/* Check if we need to flush due to large pages. */
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
-#if defined(DEBUG_TLB)
- printf("tlb_flush_page: forced full flush ("
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
- env->tlb_flush_addr, env->tlb_flush_mask);
-#endif
+ tlb_debug("forcing full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+
tlb_flush(cpu, 1);
return;
}
@@ -121,6 +173,54 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tb_flush_jmp_cache(cpu, addr);
}
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
+{
+ CPUArchState *env = cpu->env_ptr;
+ int i, k;
+ va_list argp;
+
+ va_start(argp, addr);
+
+ tlb_debug("addr "TARGET_FMT_lx"\n", addr);
+
+ /* Check if we need to flush due to large pages. */
+ if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+ tlb_debug("forced full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+
+ v_tlb_flush_by_mmuidx(cpu, argp);
+ va_end(argp);
+ return;
+ }
+ /* must reset current TB so that interrupts cannot modify the
+ links while we are modifying them */
+ cpu->current_tb = NULL;
+
+ addr &= TARGET_PAGE_MASK;
+ i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+
+ for (;;) {
+ int mmu_idx = va_arg(argp, int);
+
+ if (mmu_idx < 0) {
+ break;
+ }
+
+ tlb_debug("idx %d\n", mmu_idx);
+
+ tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+
+ /* check whether there are vltb entries that need to be flushed */
+ for (k = 0; k < CPU_VTLB_SIZE; k++) {
+ tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
+ }
+ }
+ va_end(argp);
+
+ tb_flush_jmp_cache(cpu, addr);
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
@@ -165,27 +265,24 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
return ram_addr;
}
-void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
+void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
{
- CPUState *cpu;
CPUArchState *env;
- CPU_FOREACH(cpu) {
- int mmu_idx;
+ int mmu_idx;
- env = cpu->env_ptr;
- for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
- unsigned int i;
+ env = cpu->env_ptr;
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ unsigned int i;
- for (i = 0; i < CPU_TLB_SIZE; i++) {
- tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
- start1, length);
- }
+ for (i = 0; i < CPU_TLB_SIZE; i++) {
+ tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
+ start1, length);
+ }
- for (i = 0; i < CPU_VTLB_SIZE; i++) {
- tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
- start1, length);
- }
+ for (i = 0; i < CPU_VTLB_SIZE; i++) {
+ tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
+ start1, length);
}
}
}
@@ -199,8 +296,9 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
/* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */
-void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
+void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{
+ CPUArchState *env = cpu->env_ptr;
int i;
int mmu_idx;
@@ -261,6 +359,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
CPUTLBEntry *te;
hwaddr iotlb, xlat, sz;
unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
+ int asidx = cpu_asidx_from_attrs(cpu, attrs);
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
@@ -268,15 +367,12 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
}
sz = size;
- section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
+ section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
assert(sz >= TARGET_PAGE_SIZE);
-#if defined(DEBUG_TLB)
- qemu_log_mask(CPU_LOG_MMU,
- "tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
- " prot=%x idx=%d\n",
- vaddr, paddr, prot, mmu_idx);
-#endif
+ tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
+ " prot=%x idx=%d\n",
+ vaddr, paddr, prot, mmu_idx);
address = vaddr;
if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
@@ -320,8 +416,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
} else if (memory_region_is_ram(section->mr)
- && cpu_physical_memory_is_clean(section->mr->ram_addr
- + xlat)) {
+ && cpu_physical_memory_is_clean(
+ memory_region_get_ram_addr(section->mr) + xlat)) {
te->addr_write = address | TLB_NOTDIRTY;
} else {
te->addr_write = address;
@@ -353,15 +449,17 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
void *p;
MemoryRegion *mr;
CPUState *cpu = ENV_GET_CPU(env1);
+ CPUIOTLBEntry *iotlbentry;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- mmu_idx = cpu_mmu_index(env1);
+ mmu_idx = cpu_mmu_index(env1, true);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
cpu_ldub_code(env1, addr);
}
- pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
- mr = iotlb_to_region(cpu, pd);
+ iotlbentry = &env1->iotlb[mmu_idx][page_index];
+ pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
+ mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
if (memory_region_is_unassigned(mr)) {
CPUClass *cc = CPU_GET_CLASS(cpu);