summaryrefslogtreecommitdiffstats
path: root/qemu/cpus.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/cpus.c
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/cpus.c')
-rw-r--r--qemu/cpus.c407
1 files changed, 252 insertions, 155 deletions
diff --git a/qemu/cpus.c b/qemu/cpus.c
index a822ce3d8..cbeb1f613 100644
--- a/qemu/cpus.c
+++ b/qemu/cpus.c
@@ -23,12 +23,13 @@
*/
/* Needed early for CONFIG_BSD etc. */
-#include "config-host.h"
+#include "qemu/osdep.h"
#include "monitor/monitor.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
+#include "sysemu/block-backend.h"
#include "exec/gdbstub.h"
#include "sysemu/dma.h"
#include "sysemu/kvm.h"
@@ -42,6 +43,7 @@
#include "qemu/seqlock.h"
#include "qapi-event.h"
#include "hw/nmi.h"
+#include "sysemu/replay.h"
#ifndef _WIN32
#include "qemu/compatfd.h"
@@ -69,6 +71,14 @@ static CPUState *next_cpu;
int64_t max_delay;
int64_t max_advance;
+/* vcpu throttling controls */
+static QEMUTimer *throttle_timer;
+static unsigned int throttle_percentage;
+
+#define CPU_THROTTLE_PCT_MIN 1
+#define CPU_THROTTLE_PCT_MAX 99
+#define CPU_THROTTLE_TIMESLICE_NS 10000000
+
bool cpu_is_stopped(CPUState *cpu)
{
return cpu->stopped || !runstate_is_running();
@@ -145,7 +155,7 @@ int64_t cpu_get_icount_raw(void)
icount = timers_state.qemu_icount;
if (cpu) {
- if (!cpu_can_do_io(cpu)) {
+ if (!cpu->can_do_io) {
fprintf(stderr, "Bad icount read\n");
exit(1);
}
@@ -191,7 +201,7 @@ int64_t cpu_get_ticks(void)
ticks = timers_state.cpu_ticks_offset;
if (timers_state.cpu_ticks_enabled) {
- ticks += cpu_get_real_ticks();
+ ticks += cpu_get_host_ticks();
}
if (timers_state.cpu_ticks_prev > ticks) {
@@ -239,7 +249,7 @@ void cpu_enable_ticks(void)
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (!timers_state.cpu_ticks_enabled) {
- timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
+ timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
timers_state.cpu_clock_offset -= get_clock();
timers_state.cpu_ticks_enabled = 1;
}
@@ -255,7 +265,7 @@ void cpu_disable_ticks(void)
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (timers_state.cpu_ticks_enabled) {
- timers_state.cpu_ticks_offset += cpu_get_real_ticks();
+ timers_state.cpu_ticks_offset += cpu_get_host_ticks();
timers_state.cpu_clock_offset = cpu_get_clock_locked();
timers_state.cpu_ticks_enabled = 0;
}
@@ -266,7 +276,7 @@ void cpu_disable_ticks(void)
fairly approximate, so ignore small variation.
When the guest is idle real and virtual time will be aligned in
the IO wait loop. */
-#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
+#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
static void icount_adjust(void)
{
@@ -317,7 +327,7 @@ static void icount_adjust_vm(void *opaque)
{
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- get_ticks_per_sec() / 10);
+ NANOSECONDS_PER_SECOND / 10);
icount_adjust();
}
@@ -326,18 +336,27 @@ static int64_t qemu_icount_round(int64_t count)
return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
}
-static void icount_warp_rt(void *opaque)
+static void icount_warp_rt(void)
{
+ unsigned seq;
+ int64_t warp_start;
+
/* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
* changes from -1 to another value, so the race here is okay.
*/
- if (atomic_read(&vm_clock_warp_start) == -1) {
+ do {
+ seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+ warp_start = vm_clock_warp_start;
+ } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
+
+ if (warp_start == -1) {
return;
}
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (runstate_is_running()) {
- int64_t clock = cpu_get_clock_locked();
+ int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
+ cpu_get_clock_locked());
int64_t warp_delta;
warp_delta = clock - vm_clock_warp_start;
@@ -360,6 +379,14 @@ static void icount_warp_rt(void *opaque)
}
}
+static void icount_timer_cb(void *opaque)
+{
+ /* No need for a checkpoint because the timer already synchronizes
+ * with CHECKPOINT_CLOCK_VIRTUAL_RT.
+ */
+ icount_warp_rt();
+}
+
void qtest_clock_warp(int64_t dest)
{
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@@ -381,39 +408,34 @@ void qtest_clock_warp(int64_t dest)
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
-void qemu_clock_warp(QEMUClockType type)
+void qemu_start_warp_timer(void)
{
int64_t clock;
int64_t deadline;
- /*
- * There are too many global variables to make the "warp" behavior
- * applicable to other clocks. But a clock argument removes the
- * need for if statements all over the place.
+ if (!use_icount) {
+ return;
+ }
+
+ /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
+ * do not fire, so computing the deadline does not make sense.
*/
- if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
+ if (!runstate_is_running()) {
return;
}
- if (icount_sleep) {
- /*
- * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
- * This ensures that the deadline for the timer is computed correctly
- * below.
- * This also makes sure that the insn counter is synchronized before
- * the CPU starts running, in case the CPU is woken by an event other
- * than the earliest QEMU_CLOCK_VIRTUAL timer.
- */
- icount_warp_rt(NULL);
- timer_del(icount_warp_timer);
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
+ return;
}
+
if (!all_cpu_threads_idle()) {
return;
}
if (qtest_enabled()) {
/* When testing, qtest commands advance icount. */
- return;
+ return;
}
/* We want to use the earliest deadline from ALL vm_clocks */
@@ -469,6 +491,28 @@ void qemu_clock_warp(QEMUClockType type)
}
}
+static void qemu_account_warp_timer(void)
+{
+ if (!use_icount || !icount_sleep) {
+ return;
+ }
+
+ /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
+ * do not fire, so computing the deadline does not make sense.
+ */
+ if (!runstate_is_running()) {
+ return;
+ }
+
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
+ return;
+ }
+
+ timer_del(icount_warp_timer);
+ icount_warp_rt();
+}
+
static bool icount_state_needed(void *opaque)
{
return use_icount;
@@ -505,10 +549,80 @@ static const VMStateDescription vmstate_timers = {
}
};
+static void cpu_throttle_thread(void *opaque)
+{
+ CPUState *cpu = opaque;
+ double pct;
+ double throttle_ratio;
+ long sleeptime_ns;
+
+ if (!cpu_throttle_get_percentage()) {
+ return;
+ }
+
+ pct = (double)cpu_throttle_get_percentage()/100;
+ throttle_ratio = pct / (1 - pct);
+ sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
+
+ qemu_mutex_unlock_iothread();
+ atomic_set(&cpu->throttle_thread_scheduled, 0);
+ g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
+ qemu_mutex_lock_iothread();
+}
+
+static void cpu_throttle_timer_tick(void *opaque)
+{
+ CPUState *cpu;
+ double pct;
+
+ /* Stop the timer if needed */
+ if (!cpu_throttle_get_percentage()) {
+ return;
+ }
+ CPU_FOREACH(cpu) {
+ if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
+ async_run_on_cpu(cpu, cpu_throttle_thread, cpu);
+ }
+ }
+
+ pct = (double)cpu_throttle_get_percentage()/100;
+ timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_TIMESLICE_NS / (1-pct));
+}
+
+void cpu_throttle_set(int new_throttle_pct)
+{
+ /* Ensure throttle percentage is within valid range */
+ new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
+ new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
+
+ atomic_set(&throttle_percentage, new_throttle_pct);
+
+ timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_TIMESLICE_NS);
+}
+
+void cpu_throttle_stop(void)
+{
+ atomic_set(&throttle_percentage, 0);
+}
+
+bool cpu_throttle_active(void)
+{
+ return (cpu_throttle_get_percentage() != 0);
+}
+
+int cpu_throttle_get_percentage(void)
+{
+ return atomic_read(&throttle_percentage);
+}
+
void cpu_ticks_init(void)
{
seqlock_init(&timers_state.vm_clock_seqlock, NULL);
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
+ throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
+ cpu_throttle_timer_tick, NULL);
}
void configure_icount(QemuOpts *opts, Error **errp)
@@ -527,13 +641,13 @@ void configure_icount(QemuOpts *opts, Error **errp)
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
- icount_warp_rt, NULL);
+ icount_timer_cb, NULL);
}
icount_align_option = qemu_opt_get_bool(opts, "align", false);
if (icount_align_option && !icount_sleep) {
- error_setg(errp, "align=on and sleep=no are incompatible");
+ error_setg(errp, "align=on and sleep=off are incompatible");
}
if (strcmp(option, "auto") != 0) {
errno = 0;
@@ -546,7 +660,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
} else if (!icount_sleep) {
- error_setg(errp, "shift=auto and sleep=no are incompatible");
+ error_setg(errp, "shift=auto and sleep=off are incompatible");
}
use_icount = 2;
@@ -568,7 +682,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
icount_adjust_vm, NULL);
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- get_ticks_per_sec() / 10);
+ NANOSECONDS_PER_SECOND / 10);
}
/***********************************************************/
@@ -616,15 +730,6 @@ void cpu_synchronize_all_post_init(void)
}
}
-void cpu_clean_all_dirty(void)
-{
- CPUState *cpu;
-
- CPU_FOREACH(cpu) {
- cpu_clean_state(cpu);
- }
-}
-
static int do_vm_stop(RunState state)
{
int ret = 0;
@@ -638,7 +743,7 @@ static int do_vm_stop(RunState state)
}
bdrv_drain_all();
- ret = bdrv_flush_all();
+ ret = blk_flush_all();
return ret;
}
@@ -661,14 +766,6 @@ static void cpu_handle_guest_debug(CPUState *cpu)
cpu->stopped = true;
}
-static void cpu_signal(int sig)
-{
- if (current_cpu) {
- cpu_exit(current_cpu);
- }
- exit_request = 1;
-}
-
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
{
@@ -781,29 +878,11 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu)
}
}
-static void qemu_tcg_init_cpu_signals(void)
-{
- sigset_t set;
- struct sigaction sigact;
-
- memset(&sigact, 0, sizeof(sigact));
- sigact.sa_handler = cpu_signal;
- sigaction(SIG_IPI, &sigact, NULL);
-
- sigemptyset(&set);
- sigaddset(&set, SIG_IPI);
- pthread_sigmask(SIG_UNBLOCK, &set, NULL);
-}
-
#else /* _WIN32 */
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
{
abort();
}
-
-static void qemu_tcg_init_cpu_signals(void)
-{
-}
#endif /* _WIN32 */
static QemuMutex qemu_global_mutex;
@@ -812,9 +891,6 @@ static unsigned iothread_requesting_mutex;
static QemuThread io_thread;
-static QemuThread *tcg_cpu_thread;
-static QemuCond *tcg_halt_cond;
-
/* cpu creation */
static QemuCond qemu_cpu_cond;
/* system init */
@@ -845,6 +921,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
wi.func = func;
wi.data = data;
wi.free = false;
+
+ qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = &wi;
} else {
@@ -853,9 +931,10 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
cpu->queued_work_last = &wi;
wi.next = NULL;
wi.done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
qemu_cpu_kick(cpu);
- while (!wi.done) {
+ while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
@@ -876,6 +955,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
wi->func = func;
wi->data = data;
wi->free = true;
+
+ qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = wi;
} else {
@@ -884,6 +965,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
cpu->queued_work_last = wi;
wi->next = NULL;
wi->done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
qemu_cpu_kick(cpu);
}
@@ -896,15 +978,23 @@ static void flush_queued_work(CPUState *cpu)
return;
}
- while ((wi = cpu->queued_work_first)) {
+ qemu_mutex_lock(&cpu->work_mutex);
+ while (cpu->queued_work_first != NULL) {
+ wi = cpu->queued_work_first;
cpu->queued_work_first = wi->next;
+ if (!cpu->queued_work_first) {
+ cpu->queued_work_last = NULL;
+ }
+ qemu_mutex_unlock(&cpu->work_mutex);
wi->func(wi->data);
- wi->done = true;
+ qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) {
g_free(wi);
+ } else {
+ atomic_mb_set(&wi->done, true);
}
}
- cpu->queued_work_last = NULL;
+ qemu_mutex_unlock(&cpu->work_mutex);
qemu_cond_broadcast(&qemu_work_cond);
}
@@ -913,21 +1003,16 @@ static void qemu_wait_io_event_common(CPUState *cpu)
if (cpu->stop) {
cpu->stop = false;
cpu->stopped = true;
- qemu_cond_signal(&qemu_pause_cond);
+ qemu_cond_broadcast(&qemu_pause_cond);
}
flush_queued_work(cpu);
cpu->thread_kicked = false;
}
-static void qemu_tcg_wait_io_event(void)
+static void qemu_tcg_wait_io_event(CPUState *cpu)
{
- CPUState *cpu;
-
while (all_cpu_threads_idle()) {
- /* Start accounting real time to the virtual clock if the CPUs
- are idle. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
- qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
while (iothread_requesting_mutex) {
@@ -1041,7 +1126,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
- qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
CPU_FOREACH(cpu) {
@@ -1053,7 +1137,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
- qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
+ qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
/* process any pending work */
CPU_FOREACH(cpu) {
@@ -1062,7 +1146,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
}
/* process any pending work */
- exit_request = 1;
+ atomic_mb_set(&exit_request, 1);
while (1) {
tcg_exec_all();
@@ -1074,7 +1158,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
}
- qemu_tcg_wait_io_event();
+ qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
}
return NULL;
@@ -1085,61 +1169,47 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
#ifndef _WIN32
int err;
+ if (cpu->thread_kicked) {
+ return;
+ }
+ cpu->thread_kicked = true;
err = pthread_kill(cpu->thread->thread, SIG_IPI);
if (err) {
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
exit(1);
}
#else /* _WIN32 */
- if (!qemu_cpu_is_self(cpu)) {
- CONTEXT tcgContext;
-
- if (SuspendThread(cpu->hThread) == (DWORD)-1) {
- fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
- GetLastError());
- exit(1);
- }
-
- /* On multi-core systems, we are not sure that the thread is actually
- * suspended until we can get the context.
- */
- tcgContext.ContextFlags = CONTEXT_CONTROL;
- while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
- continue;
- }
-
- cpu_signal(0);
+ abort();
+#endif
+}
- if (ResumeThread(cpu->hThread) == (DWORD)-1) {
- fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
- GetLastError());
- exit(1);
- }
+static void qemu_cpu_kick_no_halt(void)
+{
+ CPUState *cpu;
+ /* Ensure whatever caused the exit has reached the CPU threads before
+ * writing exit_request.
+ */
+ atomic_mb_set(&exit_request, 1);
+ cpu = atomic_mb_read(&tcg_current_cpu);
+ if (cpu) {
+ cpu_exit(cpu);
}
-#endif
}
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
- if (!tcg_enabled() && !cpu->thread_kicked) {
+ if (tcg_enabled()) {
+ qemu_cpu_kick_no_halt();
+ } else {
qemu_cpu_kick_thread(cpu);
- cpu->thread_kicked = true;
}
}
void qemu_cpu_kick_self(void)
{
-#ifndef _WIN32
assert(current_cpu);
-
- if (!current_cpu->thread_kicked) {
- qemu_cpu_kick_thread(current_cpu);
- current_cpu->thread_kicked = true;
- }
-#else
- abort();
-#endif
+ qemu_cpu_kick_thread(current_cpu);
}
bool qemu_cpu_is_self(CPUState *cpu)
@@ -1166,12 +1236,12 @@ void qemu_mutex_lock_iothread(void)
* TCG code execution.
*/
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
- !first_cpu || !first_cpu->thread) {
+ !first_cpu || !first_cpu->created) {
qemu_mutex_lock(&qemu_global_mutex);
atomic_dec(&iothread_requesting_mutex);
} else {
if (qemu_mutex_trylock(&qemu_global_mutex)) {
- qemu_cpu_kick_thread(first_cpu);
+ qemu_cpu_kick_no_halt();
qemu_mutex_lock(&qemu_global_mutex);
}
atomic_dec(&iothread_requesting_mutex);
@@ -1251,8 +1321,8 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
-
- tcg_cpu_address_space_init(cpu, cpu->as);
+ static QemuCond *tcg_halt_cond;
+ static QemuThread *tcg_cpu_thread;
/* share a single thread for all cpus with TCG */
if (!tcg_cpu_thread) {
@@ -1314,6 +1384,17 @@ void qemu_init_vcpu(CPUState *cpu)
cpu->nr_cores = smp_cores;
cpu->nr_threads = smp_threads;
cpu->stopped = true;
+
+ if (!cpu->as) {
+ /* If the target cpu hasn't set up any address spaces itself,
+ * give it the default one.
+ */
+ AddressSpace *as = address_space_init_shareable(cpu->memory,
+ "cpu-memory");
+ cpu->num_ases = 1;
+ cpu_address_space_init(cpu, as, 0);
+ }
+
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (tcg_enabled()) {
@@ -1329,7 +1410,7 @@ void cpu_stop_current(void)
current_cpu->stop = false;
current_cpu->stopped = true;
cpu_exit(current_cpu);
- qemu_cond_signal(&qemu_pause_cond);
+ qemu_cond_broadcast(&qemu_pause_cond);
}
}
@@ -1357,9 +1438,33 @@ int vm_stop_force_state(RunState state)
return vm_stop(state);
} else {
runstate_set(state);
+
+ bdrv_drain_all();
/* Make sure to return an error if the flush in a previous vm_stop()
* failed. */
- return bdrv_flush_all();
+ return blk_flush_all();
+ }
+}
+
+static int64_t tcg_get_icount_limit(void)
+{
+ int64_t deadline;
+
+ if (replay_mode != REPLAY_MODE_PLAY) {
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ /* Maintain prior (possibly buggy) behaviour where if no deadline
+ * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
+ * INT32_MAX nanoseconds ahead, we still use INT32_MAX
+ * nanoseconds.
+ */
+ if ((deadline < 0) || (deadline > INT32_MAX)) {
+ deadline = INT32_MAX;
+ }
+
+ return qemu_icount_round(deadline);
+ } else {
+ return replay_get_instructions();
}
}
@@ -1375,24 +1480,12 @@ static int tcg_cpu_exec(CPUState *cpu)
#endif
if (use_icount) {
int64_t count;
- int64_t deadline;
int decr;
timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+ cpu->icount_extra);
cpu->icount_decr.u16.low = 0;
cpu->icount_extra = 0;
- deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
-
- /* Maintain prior (possibly buggy) behaviour where if no deadline
- * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
- * INT32_MAX nanoseconds ahead, we still use INT32_MAX
- * nanoseconds.
- */
- if ((deadline < 0) || (deadline > INT32_MAX)) {
- deadline = INT32_MAX;
- }
-
- count = qemu_icount_round(deadline);
+ count = tcg_get_icount_limit();
timers_state.qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
@@ -1410,6 +1503,7 @@ static int tcg_cpu_exec(CPUState *cpu)
+ cpu->icount_extra);
cpu->icount_decr.u32 = 0;
cpu->icount_extra = 0;
+ replay_account_executed_instructions();
}
return ret;
}
@@ -1419,7 +1513,7 @@ static void tcg_exec_all(void)
int r;
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ qemu_account_warp_timer();
if (next_cpu == NULL) {
next_cpu = first_cpu;
@@ -1440,7 +1534,9 @@ static void tcg_exec_all(void)
break;
}
}
- exit_request = 0;
+
+ /* Pairs with smp_wmb in qemu_cpu_kick. */
+ atomic_mb_set(&exit_request, 0);
}
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
@@ -1485,22 +1581,23 @@ CpuInfoList *qmp_query_cpus(Error **errp)
info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
info->value->thread_id = cpu->thread_id;
#if defined(TARGET_I386)
- info->value->has_pc = true;
- info->value->pc = env->eip + env->segs[R_CS].base;
+ info->value->arch = CPU_INFO_ARCH_X86;
+ info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
#elif defined(TARGET_PPC)
- info->value->has_nip = true;
- info->value->nip = env->nip;
+ info->value->arch = CPU_INFO_ARCH_PPC;
+ info->value->u.ppc.nip = env->nip;
#elif defined(TARGET_SPARC)
- info->value->has_pc = true;
- info->value->pc = env->pc;
- info->value->has_npc = true;
- info->value->npc = env->npc;
+ info->value->arch = CPU_INFO_ARCH_SPARC;
+ info->value->u.q_sparc.pc = env->pc;
+ info->value->u.q_sparc.npc = env->npc;
#elif defined(TARGET_MIPS)
- info->value->has_PC = true;
- info->value->PC = env->active_tc.PC;
+ info->value->arch = CPU_INFO_ARCH_MIPS;
+ info->value->u.q_mips.PC = env->active_tc.PC;
#elif defined(TARGET_TRICORE)
- info->value->has_PC = true;
- info->value->PC = env->PC;
+ info->value->arch = CPU_INFO_ARCH_TRICORE;
+ info->value->u.tricore.PC = env->PC;
+#else
+ info->value->arch = CPU_INFO_ARCH_OTHER;
#endif
/* XXX: waiting for the qapi to support GSList */