Merge remote-tracking branch 'afaerber/qom-cpu' into staging
* afaerber/qom-cpu: (35 commits)
  target-i386: Pass X86CPU to kvm_handle_halt()
  target-i386: Pass X86CPU to kvm_get_mp_state()
  cpu: Move thread_id to CPUState
  cpus: Pass CPUState to run_on_cpu()
  target-i386: Pass X86CPU to cpu_x86_inject_mce()
  target-i386: Pass X86CPU to kvm_mce_inject()
  cpus: Pass CPUState to [qemu_]cpu_has_work()
  spapr: Pass PowerPCCPU to hypercalls
  spapr: Pass PowerPCCPU to spapr_hypercall()
  target-ppc: Pass PowerPCCPU to cpu_ppc_hypercall
  target-ppc: Pass PowerPCCPU to powerpc_excp()
  xtensa_pic: Pass XtensaCPU to xtensa_ccompare_cb()
  cpus: Pass CPUState to qemu_wait_io_event_common()
  cpus: Pass CPUState to flush_queued_work()
  cpu: Move queued_work_{first,last} to CPUState
  cpus: Pass CPUState to qemu_cpu_kick()
  target-ppc: Rename kvm_kick_{env => cpu} and pass PowerPCCPU
  ppc: Pass PowerPCCPU to {ppc6xx,ppc970,power7,ppc40x,ppce500}_set_irq()
  cpus: Pass CPUState to qemu_tcg_init_vcpu()
  cpus: Pass CPUState to qemu_tcg_cpu_thread_fn
  ...
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
			
			
This commit is contained in:
		
						commit
						98c8a73b2e
					
				| 
						 | 
				
			
			@ -438,8 +438,6 @@ void cpu_reset_interrupt(CPUArchState *env, int mask);
 | 
			
		|||
 | 
			
		||||
void cpu_exit(CPUArchState *s);
 | 
			
		||||
 | 
			
		||||
bool qemu_cpu_has_work(CPUArchState *env);
 | 
			
		||||
 | 
			
		||||
/* Breakpoint/watchpoint flags */
 | 
			
		||||
#define BP_MEM_READ           0x01
 | 
			
		||||
#define BP_MEM_WRITE          0x02
 | 
			
		||||
| 
						 | 
				
			
			@ -466,8 +464,6 @@ void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
 | 
			
		|||
#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
 | 
			
		||||
 | 
			
		||||
void cpu_single_step(CPUArchState *env, int enabled);
 | 
			
		||||
int cpu_is_stopped(CPUArchState *env);
 | 
			
		||||
void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data);
 | 
			
		||||
 | 
			
		||||
#if !defined(CONFIG_USER_ONLY)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -201,15 +201,9 @@ typedef struct CPUWatchpoint {
 | 
			
		|||
    int nr_cores;  /* number of cores within this CPU package */        \
 | 
			
		||||
    int nr_threads;/* number of threads within this CPU */              \
 | 
			
		||||
    int running; /* Nonzero if cpu is currently running(usermode).  */  \
 | 
			
		||||
    int thread_id;                                                      \
 | 
			
		||||
    /* user data */                                                     \
 | 
			
		||||
    void *opaque;                                                       \
 | 
			
		||||
                                                                        \
 | 
			
		||||
    uint32_t created;                                                   \
 | 
			
		||||
    uint32_t stop;   /* Stop request */                                 \
 | 
			
		||||
    uint32_t stopped; /* Artificially stopped */                        \
 | 
			
		||||
    struct QemuCond *halt_cond;                                         \
 | 
			
		||||
    struct qemu_work_item *queued_work_first, *queued_work_last;        \
 | 
			
		||||
    const char *cpu_model_str;                                          \
 | 
			
		||||
    struct KVMState *kvm_state;                                         \
 | 
			
		||||
    struct kvm_run *kvm_run;                                            \
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,9 +27,9 @@ int tb_invalidated_flag;
 | 
			
		|||
 | 
			
		||||
//#define CONFIG_DEBUG_EXEC
 | 
			
		||||
 | 
			
		||||
bool qemu_cpu_has_work(CPUArchState *env)
 | 
			
		||||
bool qemu_cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    return cpu_has_work(env);
 | 
			
		||||
    return cpu_has_work(cpu);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cpu_loop_exit(CPUArchState *env)
 | 
			
		||||
| 
						 | 
				
			
			@ -181,16 +181,14 @@ volatile sig_atomic_t exit_request;
 | 
			
		|||
 | 
			
		||||
int cpu_exec(CPUArchState *env)
 | 
			
		||||
{
 | 
			
		||||
#ifdef TARGET_PPC
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
#endif
 | 
			
		||||
    int ret, interrupt_request;
 | 
			
		||||
    TranslationBlock *tb;
 | 
			
		||||
    uint8_t *tc_ptr;
 | 
			
		||||
    tcg_target_ulong next_tb;
 | 
			
		||||
 | 
			
		||||
    if (env->halted) {
 | 
			
		||||
        if (!cpu_has_work(env)) {
 | 
			
		||||
        if (!cpu_has_work(cpu)) {
 | 
			
		||||
            return EXCP_HALTED;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										193
									
								
								cpus.c
								
								
								
								
							
							
						
						
									
										193
									
								
								cpus.c
								
								
								
								
							| 
						 | 
				
			
			@ -64,13 +64,15 @@ static CPUArchState *next_cpu;
 | 
			
		|||
 | 
			
		||||
static bool cpu_thread_is_idle(CPUArchState *env)
 | 
			
		||||
{
 | 
			
		||||
    if (env->stop || env->queued_work_first) {
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    if (cpu->stop || cpu->queued_work_first) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
    if (env->stopped || !runstate_is_running()) {
 | 
			
		||||
    if (cpu->stopped || !runstate_is_running()) {
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
    if (!env->halted || qemu_cpu_has_work(env) ||
 | 
			
		||||
    if (!env->halted || qemu_cpu_has_work(cpu) ||
 | 
			
		||||
        kvm_async_interrupts_enabled()) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -428,9 +430,9 @@ void cpu_synchronize_all_post_init(void)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int cpu_is_stopped(CPUArchState *env)
 | 
			
		||||
bool cpu_is_stopped(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    return !runstate_is_running() || env->stopped;
 | 
			
		||||
    return !runstate_is_running() || cpu->stopped;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void do_vm_stop(RunState state)
 | 
			
		||||
| 
						 | 
				
			
			@ -446,22 +448,24 @@ static void do_vm_stop(RunState state)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int cpu_can_run(CPUArchState *env)
 | 
			
		||||
static bool cpu_can_run(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    if (env->stop) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    if (cpu->stop) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
    if (env->stopped || !runstate_is_running()) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    if (cpu->stopped || !runstate_is_running()) {
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
    return 1;
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void cpu_handle_guest_debug(CPUArchState *env)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    gdb_set_stop_cpu(env);
 | 
			
		||||
    qemu_system_debug_request();
 | 
			
		||||
    env->stopped = 1;
 | 
			
		||||
    cpu->stopped = true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void cpu_signal(int sig)
 | 
			
		||||
| 
						 | 
				
			
			@ -636,27 +640,27 @@ void qemu_init_cpu_loop(void)
 | 
			
		|||
    qemu_thread_get_self(&io_thread);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data)
 | 
			
		||||
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
 | 
			
		||||
{
 | 
			
		||||
    struct qemu_work_item wi;
 | 
			
		||||
 | 
			
		||||
    if (qemu_cpu_is_self(env)) {
 | 
			
		||||
    if (qemu_cpu_is_self(cpu)) {
 | 
			
		||||
        func(data);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    wi.func = func;
 | 
			
		||||
    wi.data = data;
 | 
			
		||||
    if (!env->queued_work_first) {
 | 
			
		||||
        env->queued_work_first = &wi;
 | 
			
		||||
    if (cpu->queued_work_first == NULL) {
 | 
			
		||||
        cpu->queued_work_first = &wi;
 | 
			
		||||
    } else {
 | 
			
		||||
        env->queued_work_last->next = &wi;
 | 
			
		||||
        cpu->queued_work_last->next = &wi;
 | 
			
		||||
    }
 | 
			
		||||
    env->queued_work_last = &wi;
 | 
			
		||||
    cpu->queued_work_last = &wi;
 | 
			
		||||
    wi.next = NULL;
 | 
			
		||||
    wi.done = false;
 | 
			
		||||
 | 
			
		||||
    qemu_cpu_kick(env);
 | 
			
		||||
    qemu_cpu_kick(cpu);
 | 
			
		||||
    while (!wi.done) {
 | 
			
		||||
        CPUArchState *self_env = cpu_single_env;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -665,33 +669,31 @@ void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void flush_queued_work(CPUArchState *env)
 | 
			
		||||
static void flush_queued_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    struct qemu_work_item *wi;
 | 
			
		||||
 | 
			
		||||
    if (!env->queued_work_first) {
 | 
			
		||||
    if (cpu->queued_work_first == NULL) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    while ((wi = env->queued_work_first)) {
 | 
			
		||||
        env->queued_work_first = wi->next;
 | 
			
		||||
    while ((wi = cpu->queued_work_first)) {
 | 
			
		||||
        cpu->queued_work_first = wi->next;
 | 
			
		||||
        wi->func(wi->data);
 | 
			
		||||
        wi->done = true;
 | 
			
		||||
    }
 | 
			
		||||
    env->queued_work_last = NULL;
 | 
			
		||||
    cpu->queued_work_last = NULL;
 | 
			
		||||
    qemu_cond_broadcast(&qemu_work_cond);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void qemu_wait_io_event_common(CPUArchState *env)
 | 
			
		||||
static void qemu_wait_io_event_common(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    if (env->stop) {
 | 
			
		||||
        env->stop = 0;
 | 
			
		||||
        env->stopped = 1;
 | 
			
		||||
    if (cpu->stop) {
 | 
			
		||||
        cpu->stop = false;
 | 
			
		||||
        cpu->stopped = true;
 | 
			
		||||
        qemu_cond_signal(&qemu_pause_cond);
 | 
			
		||||
    }
 | 
			
		||||
    flush_queued_work(env);
 | 
			
		||||
    flush_queued_work(cpu);
 | 
			
		||||
    cpu->thread_kicked = false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -711,18 +713,20 @@ static void qemu_tcg_wait_io_event(void)
 | 
			
		|||
    }
 | 
			
		||||
 | 
			
		||||
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
			
		||||
        qemu_wait_io_event_common(env);
 | 
			
		||||
        qemu_wait_io_event_common(ENV_GET_CPU(env));
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void qemu_kvm_wait_io_event(CPUArchState *env)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    while (cpu_thread_is_idle(env)) {
 | 
			
		||||
        qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
 | 
			
		||||
        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    qemu_kvm_eat_signals(env);
 | 
			
		||||
    qemu_wait_io_event_common(env);
 | 
			
		||||
    qemu_wait_io_event_common(cpu);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *qemu_kvm_cpu_thread_fn(void *arg)
 | 
			
		||||
| 
						 | 
				
			
			@ -733,7 +737,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
 | 
			
		|||
 | 
			
		||||
    qemu_mutex_lock(&qemu_global_mutex);
 | 
			
		||||
    qemu_thread_get_self(cpu->thread);
 | 
			
		||||
    env->thread_id = qemu_get_thread_id();
 | 
			
		||||
    cpu->thread_id = qemu_get_thread_id();
 | 
			
		||||
    cpu_single_env = env;
 | 
			
		||||
 | 
			
		||||
    r = kvm_init_vcpu(env);
 | 
			
		||||
| 
						 | 
				
			
			@ -745,11 +749,11 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
 | 
			
		|||
    qemu_kvm_init_cpu_signals(env);
 | 
			
		||||
 | 
			
		||||
    /* signal CPU creation */
 | 
			
		||||
    env->created = 1;
 | 
			
		||||
    cpu->created = true;
 | 
			
		||||
    qemu_cond_signal(&qemu_cpu_cond);
 | 
			
		||||
 | 
			
		||||
    while (1) {
 | 
			
		||||
        if (cpu_can_run(env)) {
 | 
			
		||||
        if (cpu_can_run(cpu)) {
 | 
			
		||||
            r = kvm_cpu_exec(env);
 | 
			
		||||
            if (r == EXCP_DEBUG) {
 | 
			
		||||
                cpu_handle_guest_debug(env);
 | 
			
		||||
| 
						 | 
				
			
			@ -774,13 +778,13 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
 | 
			
		|||
 | 
			
		||||
    qemu_mutex_lock_iothread();
 | 
			
		||||
    qemu_thread_get_self(cpu->thread);
 | 
			
		||||
    env->thread_id = qemu_get_thread_id();
 | 
			
		||||
    cpu->thread_id = qemu_get_thread_id();
 | 
			
		||||
 | 
			
		||||
    sigemptyset(&waitset);
 | 
			
		||||
    sigaddset(&waitset, SIG_IPI);
 | 
			
		||||
 | 
			
		||||
    /* signal CPU creation */
 | 
			
		||||
    env->created = 1;
 | 
			
		||||
    cpu->created = true;
 | 
			
		||||
    qemu_cond_signal(&qemu_cpu_cond);
 | 
			
		||||
 | 
			
		||||
    cpu_single_env = env;
 | 
			
		||||
| 
						 | 
				
			
			@ -797,7 +801,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
 | 
			
		|||
        }
 | 
			
		||||
        qemu_mutex_lock_iothread();
 | 
			
		||||
        cpu_single_env = env;
 | 
			
		||||
        qemu_wait_io_event_common(env);
 | 
			
		||||
        qemu_wait_io_event_common(cpu);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -808,8 +812,8 @@ static void tcg_exec_all(void);
 | 
			
		|||
 | 
			
		||||
static void *qemu_tcg_cpu_thread_fn(void *arg)
 | 
			
		||||
{
 | 
			
		||||
    CPUArchState *env = arg;
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
    CPUState *cpu = arg;
 | 
			
		||||
    CPUArchState *env;
 | 
			
		||||
 | 
			
		||||
    qemu_tcg_init_cpu_signals();
 | 
			
		||||
    qemu_thread_get_self(cpu->thread);
 | 
			
		||||
| 
						 | 
				
			
			@ -817,18 +821,19 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
 | 
			
		|||
    /* signal CPU creation */
 | 
			
		||||
    qemu_mutex_lock(&qemu_global_mutex);
 | 
			
		||||
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
			
		||||
        env->thread_id = qemu_get_thread_id();
 | 
			
		||||
        env->created = 1;
 | 
			
		||||
        cpu = ENV_GET_CPU(env);
 | 
			
		||||
        cpu->thread_id = qemu_get_thread_id();
 | 
			
		||||
        cpu->created = true;
 | 
			
		||||
    }
 | 
			
		||||
    qemu_cond_signal(&qemu_cpu_cond);
 | 
			
		||||
 | 
			
		||||
    /* wait for initial kick-off after machine start */
 | 
			
		||||
    while (first_cpu->stopped) {
 | 
			
		||||
    while (ENV_GET_CPU(first_cpu)->stopped) {
 | 
			
		||||
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
 | 
			
		||||
 | 
			
		||||
        /* process any pending work */
 | 
			
		||||
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
			
		||||
            qemu_wait_io_event_common(env);
 | 
			
		||||
            qemu_wait_io_event_common(ENV_GET_CPU(env));
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -843,9 +848,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
 | 
			
		|||
    return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void qemu_cpu_kick_thread(CPUArchState *env)
 | 
			
		||||
static void qemu_cpu_kick_thread(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
#ifndef _WIN32
 | 
			
		||||
    int err;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -855,7 +859,7 @@ static void qemu_cpu_kick_thread(CPUArchState *env)
 | 
			
		|||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
#else /* _WIN32 */
 | 
			
		||||
    if (!qemu_cpu_is_self(env)) {
 | 
			
		||||
    if (!qemu_cpu_is_self(cpu)) {
 | 
			
		||||
        SuspendThread(cpu->hThread);
 | 
			
		||||
        cpu_signal(0);
 | 
			
		||||
        ResumeThread(cpu->hThread);
 | 
			
		||||
| 
						 | 
				
			
			@ -863,14 +867,11 @@ static void qemu_cpu_kick_thread(CPUArchState *env)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void qemu_cpu_kick(void *_env)
 | 
			
		||||
void qemu_cpu_kick(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUArchState *env = _env;
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    qemu_cond_broadcast(env->halt_cond);
 | 
			
		||||
    qemu_cond_broadcast(cpu->halt_cond);
 | 
			
		||||
    if (!tcg_enabled() && !cpu->thread_kicked) {
 | 
			
		||||
        qemu_cpu_kick_thread(env);
 | 
			
		||||
        qemu_cpu_kick_thread(cpu);
 | 
			
		||||
        cpu->thread_kicked = true;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -882,7 +883,7 @@ void qemu_cpu_kick_self(void)
 | 
			
		|||
    CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
 | 
			
		||||
 | 
			
		||||
    if (!cpu_single_cpu->thread_kicked) {
 | 
			
		||||
        qemu_cpu_kick_thread(cpu_single_env);
 | 
			
		||||
        qemu_cpu_kick_thread(cpu_single_cpu);
 | 
			
		||||
        cpu_single_cpu->thread_kicked = true;
 | 
			
		||||
    }
 | 
			
		||||
#else
 | 
			
		||||
| 
						 | 
				
			
			@ -890,17 +891,14 @@ void qemu_cpu_kick_self(void)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int qemu_cpu_is_self(void *_env)
 | 
			
		||||
bool qemu_cpu_is_self(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUArchState *env = _env;
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    return qemu_thread_is_self(cpu->thread);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool qemu_in_vcpu_thread(void)
 | 
			
		||||
{
 | 
			
		||||
    return cpu_single_env && qemu_cpu_is_self(cpu_single_env);
 | 
			
		||||
    return cpu_single_env && qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void qemu_mutex_lock_iothread(void)
 | 
			
		||||
| 
						 | 
				
			
			@ -910,7 +908,7 @@ void qemu_mutex_lock_iothread(void)
 | 
			
		|||
    } else {
 | 
			
		||||
        iothread_requesting_mutex = true;
 | 
			
		||||
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
 | 
			
		||||
            qemu_cpu_kick_thread(first_cpu);
 | 
			
		||||
            qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu));
 | 
			
		||||
            qemu_mutex_lock(&qemu_global_mutex);
 | 
			
		||||
        }
 | 
			
		||||
        iothread_requesting_mutex = false;
 | 
			
		||||
| 
						 | 
				
			
			@ -928,7 +926,8 @@ static int all_vcpus_paused(void)
 | 
			
		|||
    CPUArchState *penv = first_cpu;
 | 
			
		||||
 | 
			
		||||
    while (penv) {
 | 
			
		||||
        if (!penv->stopped) {
 | 
			
		||||
        CPUState *pcpu = ENV_GET_CPU(penv);
 | 
			
		||||
        if (!pcpu->stopped) {
 | 
			
		||||
            return 0;
 | 
			
		||||
        }
 | 
			
		||||
        penv = penv->next_cpu;
 | 
			
		||||
| 
						 | 
				
			
			@ -943,8 +942,9 @@ void pause_all_vcpus(void)
 | 
			
		|||
 | 
			
		||||
    qemu_clock_enable(vm_clock, false);
 | 
			
		||||
    while (penv) {
 | 
			
		||||
        penv->stop = 1;
 | 
			
		||||
        qemu_cpu_kick(penv);
 | 
			
		||||
        CPUState *pcpu = ENV_GET_CPU(penv);
 | 
			
		||||
        pcpu->stop = true;
 | 
			
		||||
        qemu_cpu_kick(pcpu);
 | 
			
		||||
        penv = penv->next_cpu;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -952,8 +952,9 @@ void pause_all_vcpus(void)
 | 
			
		|||
        cpu_stop_current();
 | 
			
		||||
        if (!kvm_enabled()) {
 | 
			
		||||
            while (penv) {
 | 
			
		||||
                penv->stop = 0;
 | 
			
		||||
                penv->stopped = 1;
 | 
			
		||||
                CPUState *pcpu = ENV_GET_CPU(penv);
 | 
			
		||||
                pcpu->stop = 0;
 | 
			
		||||
                pcpu->stopped = true;
 | 
			
		||||
                penv = penv->next_cpu;
 | 
			
		||||
            }
 | 
			
		||||
            return;
 | 
			
		||||
| 
						 | 
				
			
			@ -964,7 +965,7 @@ void pause_all_vcpus(void)
 | 
			
		|||
        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
 | 
			
		||||
        penv = first_cpu;
 | 
			
		||||
        while (penv) {
 | 
			
		||||
            qemu_cpu_kick(penv);
 | 
			
		||||
            qemu_cpu_kick(ENV_GET_CPU(penv));
 | 
			
		||||
            penv = penv->next_cpu;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -976,36 +977,34 @@ void resume_all_vcpus(void)
 | 
			
		|||
 | 
			
		||||
    qemu_clock_enable(vm_clock, true);
 | 
			
		||||
    while (penv) {
 | 
			
		||||
        penv->stop = 0;
 | 
			
		||||
        penv->stopped = 0;
 | 
			
		||||
        qemu_cpu_kick(penv);
 | 
			
		||||
        CPUState *pcpu = ENV_GET_CPU(penv);
 | 
			
		||||
        pcpu->stop = false;
 | 
			
		||||
        pcpu->stopped = false;
 | 
			
		||||
        qemu_cpu_kick(pcpu);
 | 
			
		||||
        penv = penv->next_cpu;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void qemu_tcg_init_vcpu(void *_env)
 | 
			
		||||
static void qemu_tcg_init_vcpu(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUArchState *env = _env;
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    /* share a single thread for all cpus with TCG */
 | 
			
		||||
    if (!tcg_cpu_thread) {
 | 
			
		||||
        cpu->thread = g_malloc0(sizeof(QemuThread));
 | 
			
		||||
        env->halt_cond = g_malloc0(sizeof(QemuCond));
 | 
			
		||||
        qemu_cond_init(env->halt_cond);
 | 
			
		||||
        tcg_halt_cond = env->halt_cond;
 | 
			
		||||
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, env,
 | 
			
		||||
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
 | 
			
		||||
        qemu_cond_init(cpu->halt_cond);
 | 
			
		||||
        tcg_halt_cond = cpu->halt_cond;
 | 
			
		||||
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
 | 
			
		||||
                           QEMU_THREAD_JOINABLE);
 | 
			
		||||
#ifdef _WIN32
 | 
			
		||||
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
 | 
			
		||||
#endif
 | 
			
		||||
        while (env->created == 0) {
 | 
			
		||||
        while (!cpu->created) {
 | 
			
		||||
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
 | 
			
		||||
        }
 | 
			
		||||
        tcg_cpu_thread = cpu->thread;
 | 
			
		||||
    } else {
 | 
			
		||||
        cpu->thread = tcg_cpu_thread;
 | 
			
		||||
        env->halt_cond = tcg_halt_cond;
 | 
			
		||||
        cpu->halt_cond = tcg_halt_cond;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1014,11 +1013,11 @@ static void qemu_kvm_start_vcpu(CPUArchState *env)
 | 
			
		|||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    cpu->thread = g_malloc0(sizeof(QemuThread));
 | 
			
		||||
    env->halt_cond = g_malloc0(sizeof(QemuCond));
 | 
			
		||||
    qemu_cond_init(env->halt_cond);
 | 
			
		||||
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
 | 
			
		||||
    qemu_cond_init(cpu->halt_cond);
 | 
			
		||||
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
 | 
			
		||||
                       QEMU_THREAD_JOINABLE);
 | 
			
		||||
    while (env->created == 0) {
 | 
			
		||||
    while (!cpu->created) {
 | 
			
		||||
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1028,11 +1027,11 @@ static void qemu_dummy_start_vcpu(CPUArchState *env)
 | 
			
		|||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    cpu->thread = g_malloc0(sizeof(QemuThread));
 | 
			
		||||
    env->halt_cond = g_malloc0(sizeof(QemuCond));
 | 
			
		||||
    qemu_cond_init(env->halt_cond);
 | 
			
		||||
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
 | 
			
		||||
    qemu_cond_init(cpu->halt_cond);
 | 
			
		||||
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env,
 | 
			
		||||
                       QEMU_THREAD_JOINABLE);
 | 
			
		||||
    while (env->created == 0) {
 | 
			
		||||
    while (!cpu->created) {
 | 
			
		||||
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1040,14 +1039,15 @@ static void qemu_dummy_start_vcpu(CPUArchState *env)
 | 
			
		|||
void qemu_init_vcpu(void *_env)
 | 
			
		||||
{
 | 
			
		||||
    CPUArchState *env = _env;
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    env->nr_cores = smp_cores;
 | 
			
		||||
    env->nr_threads = smp_threads;
 | 
			
		||||
    env->stopped = 1;
 | 
			
		||||
    cpu->stopped = true;
 | 
			
		||||
    if (kvm_enabled()) {
 | 
			
		||||
        qemu_kvm_start_vcpu(env);
 | 
			
		||||
    } else if (tcg_enabled()) {
 | 
			
		||||
        qemu_tcg_init_vcpu(env);
 | 
			
		||||
        qemu_tcg_init_vcpu(cpu);
 | 
			
		||||
    } else {
 | 
			
		||||
        qemu_dummy_start_vcpu(env);
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -1056,8 +1056,9 @@ void qemu_init_vcpu(void *_env)
 | 
			
		|||
void cpu_stop_current(void)
 | 
			
		||||
{
 | 
			
		||||
    if (cpu_single_env) {
 | 
			
		||||
        cpu_single_env->stop = 0;
 | 
			
		||||
        cpu_single_env->stopped = 1;
 | 
			
		||||
        CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
 | 
			
		||||
        cpu_single_cpu->stop = false;
 | 
			
		||||
        cpu_single_cpu->stopped = true;
 | 
			
		||||
        cpu_exit(cpu_single_env);
 | 
			
		||||
        qemu_cond_signal(&qemu_pause_cond);
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -1138,17 +1139,18 @@ static void tcg_exec_all(void)
 | 
			
		|||
    }
 | 
			
		||||
    for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
 | 
			
		||||
        CPUArchState *env = next_cpu;
 | 
			
		||||
        CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
        qemu_clock_enable(vm_clock,
 | 
			
		||||
                          (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
 | 
			
		||||
 | 
			
		||||
        if (cpu_can_run(env)) {
 | 
			
		||||
        if (cpu_can_run(cpu)) {
 | 
			
		||||
            r = tcg_cpu_exec(env);
 | 
			
		||||
            if (r == EXCP_DEBUG) {
 | 
			
		||||
                cpu_handle_guest_debug(env);
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        } else if (env->stop || env->stopped) {
 | 
			
		||||
        } else if (cpu->stop || cpu->stopped) {
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -1203,7 +1205,8 @@ CpuInfoList *qmp_query_cpus(Error **errp)
 | 
			
		|||
    CpuInfoList *head = NULL, *cur_item = NULL;
 | 
			
		||||
    CPUArchState *env;
 | 
			
		||||
 | 
			
		||||
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
			
		||||
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
			
		||||
        CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
        CpuInfoList *info;
 | 
			
		||||
 | 
			
		||||
        cpu_synchronize_state(env);
 | 
			
		||||
| 
						 | 
				
			
			@ -1213,7 +1216,7 @@ CpuInfoList *qmp_query_cpus(Error **errp)
 | 
			
		|||
        info->value->CPU = env->cpu_index;
 | 
			
		||||
        info->value->current = (env == first_cpu);
 | 
			
		||||
        info->value->halted = env->halted;
 | 
			
		||||
        info->value->thread_id = env->thread_id;
 | 
			
		||||
        info->value->thread_id = cpu->thread_id;
 | 
			
		||||
#if defined(TARGET_I386)
 | 
			
		||||
        info->value->has_pc = true;
 | 
			
		||||
        info->value->pc = env->eip + env->segs[R_CS].base;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										10
									
								
								exec.c
								
								
								
								
							
							
						
						
									
										10
									
								
								exec.c
								
								
								
								
							| 
						 | 
				
			
			@ -689,6 +689,9 @@ CPUArchState *qemu_get_cpu(int cpu)
 | 
			
		|||
 | 
			
		||||
void cpu_exec_init(CPUArchState *env)
 | 
			
		||||
{
 | 
			
		||||
#ifndef CONFIG_USER_ONLY
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
#endif
 | 
			
		||||
    CPUArchState **penv;
 | 
			
		||||
    int cpu_index;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -707,7 +710,7 @@ void cpu_exec_init(CPUArchState *env)
 | 
			
		|||
    QTAILQ_INIT(&env->breakpoints);
 | 
			
		||||
    QTAILQ_INIT(&env->watchpoints);
 | 
			
		||||
#ifndef CONFIG_USER_ONLY
 | 
			
		||||
    env->thread_id = qemu_get_thread_id();
 | 
			
		||||
    cpu->thread_id = qemu_get_thread_id();
 | 
			
		||||
#endif
 | 
			
		||||
    *penv = env;
 | 
			
		||||
#if defined(CONFIG_USER_ONLY)
 | 
			
		||||
| 
						 | 
				
			
			@ -1693,6 +1696,7 @@ static void cpu_unlink_tb(CPUArchState *env)
 | 
			
		|||
/* mask must never be zero, except for A20 change call */
 | 
			
		||||
static void tcg_handle_interrupt(CPUArchState *env, int mask)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
    int old_mask;
 | 
			
		||||
 | 
			
		||||
    old_mask = env->interrupt_request;
 | 
			
		||||
| 
						 | 
				
			
			@ -1702,8 +1706,8 @@ static void tcg_handle_interrupt(CPUArchState *env, int mask)
 | 
			
		|||
     * If called from iothread context, wake the target cpu in
 | 
			
		||||
     * case its halted.
 | 
			
		||||
     */
 | 
			
		||||
    if (!qemu_cpu_is_self(env)) {
 | 
			
		||||
        qemu_cpu_kick(env);
 | 
			
		||||
    if (!qemu_cpu_is_self(cpu)) {
 | 
			
		||||
        qemu_cpu_kick(cpu);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										40
									
								
								hw/apic.c
								
								
								
								
							
							
						
						
									
										40
									
								
								hw/apic.c
								
								
								
								
							| 
						 | 
				
			
			@ -107,7 +107,7 @@ static void apic_sync_vapic(APICCommonState *s, int sync_type)
 | 
			
		|||
        length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
 | 
			
		||||
 | 
			
		||||
        if (sync_type & SYNC_TO_VAPIC) {
 | 
			
		||||
            assert(qemu_cpu_is_self(s->cpu_env));
 | 
			
		||||
            assert(qemu_cpu_is_self(CPU(s->cpu)));
 | 
			
		||||
 | 
			
		||||
            vapic_state.tpr = s->tpr;
 | 
			
		||||
            vapic_state.enabled = 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -151,15 +151,15 @@ static void apic_local_deliver(APICCommonState *s, int vector)
 | 
			
		|||
 | 
			
		||||
    switch ((lvt >> 8) & 7) {
 | 
			
		||||
    case APIC_DM_SMI:
 | 
			
		||||
        cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SMI);
 | 
			
		||||
        cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_SMI);
 | 
			
		||||
        break;
 | 
			
		||||
 | 
			
		||||
    case APIC_DM_NMI:
 | 
			
		||||
        cpu_interrupt(s->cpu_env, CPU_INTERRUPT_NMI);
 | 
			
		||||
        cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_NMI);
 | 
			
		||||
        break;
 | 
			
		||||
 | 
			
		||||
    case APIC_DM_EXTINT:
 | 
			
		||||
        cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
 | 
			
		||||
        cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD);
 | 
			
		||||
        break;
 | 
			
		||||
 | 
			
		||||
    case APIC_DM_FIXED:
 | 
			
		||||
| 
						 | 
				
			
			@ -187,7 +187,7 @@ void apic_deliver_pic_intr(DeviceState *d, int level)
 | 
			
		|||
            reset_bit(s->irr, lvt & 0xff);
 | 
			
		||||
            /* fall through */
 | 
			
		||||
        case APIC_DM_EXTINT:
 | 
			
		||||
            cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
 | 
			
		||||
            cpu_reset_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD);
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -248,18 +248,22 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask,
 | 
			
		|||
 | 
			
		||||
        case APIC_DM_SMI:
 | 
			
		||||
            foreach_apic(apic_iter, deliver_bitmask,
 | 
			
		||||
                cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_SMI) );
 | 
			
		||||
                cpu_interrupt(&apic_iter->cpu->env, CPU_INTERRUPT_SMI)
 | 
			
		||||
            );
 | 
			
		||||
            return;
 | 
			
		||||
 | 
			
		||||
        case APIC_DM_NMI:
 | 
			
		||||
            foreach_apic(apic_iter, deliver_bitmask,
 | 
			
		||||
                cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_NMI) );
 | 
			
		||||
                cpu_interrupt(&apic_iter->cpu->env, CPU_INTERRUPT_NMI)
 | 
			
		||||
            );
 | 
			
		||||
            return;
 | 
			
		||||
 | 
			
		||||
        case APIC_DM_INIT:
 | 
			
		||||
            /* normal INIT IPI sent to processors */
 | 
			
		||||
            foreach_apic(apic_iter, deliver_bitmask,
 | 
			
		||||
                         cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_INIT) );
 | 
			
		||||
                         cpu_interrupt(&apic_iter->cpu->env,
 | 
			
		||||
                                       CPU_INTERRUPT_INIT)
 | 
			
		||||
            );
 | 
			
		||||
            return;
 | 
			
		||||
 | 
			
		||||
        case APIC_DM_EXTINT:
 | 
			
		||||
| 
						 | 
				
			
			@ -293,7 +297,7 @@ static void apic_set_base(APICCommonState *s, uint64_t val)
 | 
			
		|||
    /* if disabled, cannot be enabled again */
 | 
			
		||||
    if (!(val & MSR_IA32_APICBASE_ENABLE)) {
 | 
			
		||||
        s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
 | 
			
		||||
        cpu_clear_apic_feature(s->cpu_env);
 | 
			
		||||
        cpu_clear_apic_feature(&s->cpu->env);
 | 
			
		||||
        s->spurious_vec &= ~APIC_SV_ENABLE;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -359,13 +363,15 @@ static int apic_irq_pending(APICCommonState *s)
 | 
			
		|||
/* signal the CPU if an irq is pending */
 | 
			
		||||
static void apic_update_irq(APICCommonState *s)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = CPU(s->cpu);
 | 
			
		||||
 | 
			
		||||
    if (!(s->spurious_vec & APIC_SV_ENABLE)) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    if (!qemu_cpu_is_self(s->cpu_env)) {
 | 
			
		||||
        cpu_interrupt(s->cpu_env, CPU_INTERRUPT_POLL);
 | 
			
		||||
    if (!qemu_cpu_is_self(cpu)) {
 | 
			
		||||
        cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_POLL);
 | 
			
		||||
    } else if (apic_irq_pending(s) > 0) {
 | 
			
		||||
        cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
 | 
			
		||||
        cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -472,18 +478,18 @@ static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
 | 
			
		|||
static void apic_startup(APICCommonState *s, int vector_num)
 | 
			
		||||
{
 | 
			
		||||
    s->sipi_vector = vector_num;
 | 
			
		||||
    cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
 | 
			
		||||
    cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_SIPI);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void apic_sipi(DeviceState *d)
 | 
			
		||||
{
 | 
			
		||||
    APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
 | 
			
		||||
 | 
			
		||||
    cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
 | 
			
		||||
    cpu_reset_interrupt(&s->cpu->env, CPU_INTERRUPT_SIPI);
 | 
			
		||||
 | 
			
		||||
    if (!s->wait_for_sipi)
 | 
			
		||||
        return;
 | 
			
		||||
    cpu_x86_load_seg_cache_sipi(s->cpu_env, s->sipi_vector);
 | 
			
		||||
    cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector);
 | 
			
		||||
    s->wait_for_sipi = 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -672,7 +678,7 @@ static uint32_t apic_mem_readl(void *opaque, hwaddr addr)
 | 
			
		|||
    case 0x08:
 | 
			
		||||
        apic_sync_vapic(s, SYNC_FROM_VAPIC);
 | 
			
		||||
        if (apic_report_tpr_access) {
 | 
			
		||||
            cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_READ);
 | 
			
		||||
            cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ);
 | 
			
		||||
        }
 | 
			
		||||
        val = s->tpr;
 | 
			
		||||
        break;
 | 
			
		||||
| 
						 | 
				
			
			@ -774,7 +780,7 @@ static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
 | 
			
		|||
        break;
 | 
			
		||||
    case 0x08:
 | 
			
		||||
        if (apic_report_tpr_access) {
 | 
			
		||||
            cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_WRITE);
 | 
			
		||||
            cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE);
 | 
			
		||||
        }
 | 
			
		||||
        s->tpr = val;
 | 
			
		||||
        apic_sync_vapic(s, SYNC_TO_VAPIC);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -103,7 +103,7 @@ void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
 | 
			
		|||
{
 | 
			
		||||
    APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
 | 
			
		||||
 | 
			
		||||
    vapic_report_tpr_access(s->vapic, s->cpu_env, ip, access);
 | 
			
		||||
    vapic_report_tpr_access(s->vapic, &s->cpu->env, ip, access);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void apic_report_irq_delivered(int delivered)
 | 
			
		||||
| 
						 | 
				
			
			@ -217,7 +217,7 @@ static void apic_reset_common(DeviceState *d)
 | 
			
		|||
    APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
 | 
			
		||||
    bool bsp;
 | 
			
		||||
 | 
			
		||||
    bsp = cpu_is_bsp(x86_env_get_cpu(s->cpu_env));
 | 
			
		||||
    bsp = cpu_is_bsp(s->cpu);
 | 
			
		||||
    s->apicbase = 0xfee00000 |
 | 
			
		||||
        (bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -368,7 +368,6 @@ static const VMStateDescription vmstate_apic_common = {
 | 
			
		|||
 | 
			
		||||
static Property apic_properties_common[] = {
 | 
			
		||||
    DEFINE_PROP_UINT8("id", APICCommonState, id, -1),
 | 
			
		||||
    DEFINE_PROP_PTR("cpu_env", APICCommonState, cpu_env),
 | 
			
		||||
    DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT,
 | 
			
		||||
                    true),
 | 
			
		||||
    DEFINE_PROP_END_OF_LIST(),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,8 +95,9 @@ typedef struct APICCommonClass
 | 
			
		|||
 | 
			
		||||
struct APICCommonState {
 | 
			
		||||
    SysBusDevice busdev;
 | 
			
		||||
 | 
			
		||||
    MemoryRegion io_memory;
 | 
			
		||||
    void *cpu_env;
 | 
			
		||||
    X86CPU *cpu;
 | 
			
		||||
    uint32_t apicbase;
 | 
			
		||||
    uint8_t id;
 | 
			
		||||
    uint8_t arb_id;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -104,7 +104,7 @@ static void kvm_apic_enable_tpr_reporting(APICCommonState *s, bool enable)
 | 
			
		|||
        .enabled = enable
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    kvm_vcpu_ioctl(s->cpu_env, KVM_TPR_ACCESS_REPORTING, &ctl);
 | 
			
		||||
    kvm_vcpu_ioctl(&s->cpu->env, KVM_TPR_ACCESS_REPORTING, &ctl);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void kvm_apic_vapic_base_update(APICCommonState *s)
 | 
			
		||||
| 
						 | 
				
			
			@ -114,7 +114,7 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
 | 
			
		|||
    };
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    ret = kvm_vcpu_ioctl(s->cpu_env, KVM_SET_VAPIC_ADDR, &vapid_addr);
 | 
			
		||||
    ret = kvm_vcpu_ioctl(&s->cpu->env, KVM_SET_VAPIC_ADDR, &vapid_addr);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        fprintf(stderr, "KVM: setting VAPIC address failed (%s)\n",
 | 
			
		||||
                strerror(-ret));
 | 
			
		||||
| 
						 | 
				
			
			@ -125,7 +125,7 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
 | 
			
		|||
static void do_inject_external_nmi(void *data)
 | 
			
		||||
{
 | 
			
		||||
    APICCommonState *s = data;
 | 
			
		||||
    CPUX86State *env = s->cpu_env;
 | 
			
		||||
    CPUX86State *env = &s->cpu->env;
 | 
			
		||||
    uint32_t lvt;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -143,7 +143,7 @@ static void do_inject_external_nmi(void *data)
 | 
			
		|||
 | 
			
		||||
static void kvm_apic_external_nmi(APICCommonState *s)
 | 
			
		||||
{
 | 
			
		||||
    run_on_cpu(s->cpu_env, do_inject_external_nmi, s);
 | 
			
		||||
    run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static uint64_t kvm_apic_mem_read(void *opaque, hwaddr addr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -475,11 +475,13 @@ static void vapic_enable_tpr_reporting(bool enable)
 | 
			
		|||
    VAPICEnableTPRReporting info = {
 | 
			
		||||
        .enable = enable,
 | 
			
		||||
    };
 | 
			
		||||
    X86CPU *cpu;
 | 
			
		||||
    CPUX86State *env;
 | 
			
		||||
 | 
			
		||||
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
			
		||||
        cpu = x86_env_get_cpu(env);
 | 
			
		||||
        info.apic = env->apic_state;
 | 
			
		||||
        run_on_cpu(env, vapic_do_enable_tpr_reporting, &info);
 | 
			
		||||
        run_on_cpu(CPU(cpu), vapic_do_enable_tpr_reporting, &info);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -717,7 +719,7 @@ static int vapic_post_load(void *opaque, int version_id)
 | 
			
		|||
    }
 | 
			
		||||
    if (s->state == VAPIC_ACTIVE) {
 | 
			
		||||
        if (smp_cpus == 1) {
 | 
			
		||||
            run_on_cpu(first_cpu, do_vapic_enable, s);
 | 
			
		||||
            run_on_cpu(ENV_GET_CPU(first_cpu), do_vapic_enable, s);
 | 
			
		||||
        } else {
 | 
			
		||||
            zero = g_malloc0(s->rom_state.vapic_size);
 | 
			
		||||
            cpu_physical_memory_rw(s->vapic_paddr, zero,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										56
									
								
								hw/pc.c
								
								
								
								
							
							
						
						
									
										56
									
								
								hw/pc.c
								
								
								
								
							| 
						 | 
				
			
			@ -71,8 +71,6 @@
 | 
			
		|||
#define FW_CFG_E820_TABLE (FW_CFG_ARCH_LOCAL + 3)
 | 
			
		||||
#define FW_CFG_HPET (FW_CFG_ARCH_LOCAL + 4)
 | 
			
		||||
 | 
			
		||||
#define MSI_ADDR_BASE 0xfee00000
 | 
			
		||||
 | 
			
		||||
#define E820_NR_ENTRIES		16
 | 
			
		||||
 | 
			
		||||
struct e820_entry {
 | 
			
		||||
| 
						 | 
				
			
			@ -849,35 +847,6 @@ DeviceState *cpu_get_current_apic(void)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static DeviceState *apic_init(void *env, uint8_t apic_id)
 | 
			
		||||
{
 | 
			
		||||
    DeviceState *dev;
 | 
			
		||||
    static int apic_mapped;
 | 
			
		||||
 | 
			
		||||
    if (kvm_irqchip_in_kernel()) {
 | 
			
		||||
        dev = qdev_create(NULL, "kvm-apic");
 | 
			
		||||
    } else if (xen_enabled()) {
 | 
			
		||||
        dev = qdev_create(NULL, "xen-apic");
 | 
			
		||||
    } else {
 | 
			
		||||
        dev = qdev_create(NULL, "apic");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    qdev_prop_set_uint8(dev, "id", apic_id);
 | 
			
		||||
    qdev_prop_set_ptr(dev, "cpu_env", env);
 | 
			
		||||
    qdev_init_nofail(dev);
 | 
			
		||||
 | 
			
		||||
    /* XXX: mapping more APICs at the same memory location */
 | 
			
		||||
    if (apic_mapped == 0) {
 | 
			
		||||
        /* NOTE: the APIC is directly connected to the CPU - it is not
 | 
			
		||||
           on the global memory bus. */
 | 
			
		||||
        /* XXX: what if the base changes? */
 | 
			
		||||
        sysbus_mmio_map(sysbus_from_qdev(dev), 0, MSI_ADDR_BASE);
 | 
			
		||||
        apic_mapped = 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return dev;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void pc_acpi_smi_interrupt(void *opaque, int irq, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *s = opaque;
 | 
			
		||||
| 
						 | 
				
			
			@ -887,24 +856,6 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static X86CPU *pc_new_cpu(const char *cpu_model)
 | 
			
		||||
{
 | 
			
		||||
    X86CPU *cpu;
 | 
			
		||||
    CPUX86State *env;
 | 
			
		||||
 | 
			
		||||
    cpu = cpu_x86_init(cpu_model);
 | 
			
		||||
    if (cpu == NULL) {
 | 
			
		||||
        fprintf(stderr, "Unable to find x86 CPU definition\n");
 | 
			
		||||
        exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    env = &cpu->env;
 | 
			
		||||
    if ((env->cpuid_features & CPUID_APIC) || smp_cpus > 1) {
 | 
			
		||||
        env->apic_state = apic_init(env, env->cpuid_apic_id);
 | 
			
		||||
    }
 | 
			
		||||
    cpu_reset(CPU(cpu));
 | 
			
		||||
    return cpu;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void pc_cpus_init(const char *cpu_model)
 | 
			
		||||
{
 | 
			
		||||
    int i;
 | 
			
		||||
| 
						 | 
				
			
			@ -918,8 +869,11 @@ void pc_cpus_init(const char *cpu_model)
 | 
			
		|||
#endif
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for(i = 0; i < smp_cpus; i++) {
 | 
			
		||||
        pc_new_cpu(cpu_model);
 | 
			
		||||
    for (i = 0; i < smp_cpus; i++) {
 | 
			
		||||
        if (!cpu_x86_init(cpu_model)) {
 | 
			
		||||
            fprintf(stderr, "Unable to find x86 CPU definition\n");
 | 
			
		||||
            exit(1);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										59
									
								
								hw/ppc.c
								
								
								
								
							
							
						
						
									
										59
									
								
								hw/ppc.c
								
								
								
								
							| 
						 | 
				
			
			@ -75,9 +75,10 @@ void ppc_set_irq(CPUPPCState *env, int n_IRQ, int level)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/* PowerPC 6xx / 7xx internal IRQ controller */
 | 
			
		||||
static void ppc6xx_set_irq (void *opaque, int pin, int level)
 | 
			
		||||
static void ppc6xx_set_irq(void *opaque, int pin, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = opaque;
 | 
			
		||||
    PowerPCCPU *cpu = opaque;
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    int cur_level;
 | 
			
		||||
 | 
			
		||||
    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 | 
			
		||||
| 
						 | 
				
			
			@ -151,17 +152,20 @@ static void ppc6xx_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ppc6xx_irq_init (CPUPPCState *env)
 | 
			
		||||
void ppc6xx_irq_init(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, env,
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
 | 
			
		||||
                                                  PPC6xx_INPUT_NB);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if defined(TARGET_PPC64)
 | 
			
		||||
/* PowerPC 970 internal IRQ controller */
 | 
			
		||||
static void ppc970_set_irq (void *opaque, int pin, int level)
 | 
			
		||||
static void ppc970_set_irq(void *opaque, int pin, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = opaque;
 | 
			
		||||
    PowerPCCPU *cpu = opaque;
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    int cur_level;
 | 
			
		||||
 | 
			
		||||
    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 | 
			
		||||
| 
						 | 
				
			
			@ -202,7 +206,7 @@ static void ppc970_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
            } else {
 | 
			
		||||
                LOG_IRQ("%s: restart the CPU\n", __func__);
 | 
			
		||||
                env->halted = 0;
 | 
			
		||||
                qemu_cpu_kick(env);
 | 
			
		||||
                qemu_cpu_kick(CPU(cpu));
 | 
			
		||||
            }
 | 
			
		||||
            break;
 | 
			
		||||
        case PPC970_INPUT_HRESET:
 | 
			
		||||
| 
						 | 
				
			
			@ -233,16 +237,19 @@ static void ppc970_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ppc970_irq_init (CPUPPCState *env)
 | 
			
		||||
void ppc970_irq_init(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, env,
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
 | 
			
		||||
                                                  PPC970_INPUT_NB);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* POWER7 internal IRQ controller */
 | 
			
		||||
static void power7_set_irq (void *opaque, int pin, int level)
 | 
			
		||||
static void power7_set_irq(void *opaque, int pin, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = opaque;
 | 
			
		||||
    PowerPCCPU *cpu = opaque;
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
 | 
			
		||||
    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 | 
			
		||||
                env, pin, level);
 | 
			
		||||
| 
						 | 
				
			
			@ -266,17 +273,20 @@ static void power7_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ppcPOWER7_irq_init (CPUPPCState *env)
 | 
			
		||||
void ppcPOWER7_irq_init(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, env,
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
 | 
			
		||||
                                                  POWER7_INPUT_NB);
 | 
			
		||||
}
 | 
			
		||||
#endif /* defined(TARGET_PPC64) */
 | 
			
		||||
 | 
			
		||||
/* PowerPC 40x internal IRQ controller */
 | 
			
		||||
static void ppc40x_set_irq (void *opaque, int pin, int level)
 | 
			
		||||
static void ppc40x_set_irq(void *opaque, int pin, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = opaque;
 | 
			
		||||
    PowerPCCPU *cpu = opaque;
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    int cur_level;
 | 
			
		||||
 | 
			
		||||
    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 | 
			
		||||
| 
						 | 
				
			
			@ -325,7 +335,7 @@ static void ppc40x_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
            } else {
 | 
			
		||||
                LOG_IRQ("%s: restart the CPU\n", __func__);
 | 
			
		||||
                env->halted = 0;
 | 
			
		||||
                qemu_cpu_kick(env);
 | 
			
		||||
                qemu_cpu_kick(CPU(cpu));
 | 
			
		||||
            }
 | 
			
		||||
            break;
 | 
			
		||||
        case PPC40x_INPUT_DEBUG:
 | 
			
		||||
| 
						 | 
				
			
			@ -346,16 +356,19 @@ static void ppc40x_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ppc40x_irq_init (CPUPPCState *env)
 | 
			
		||||
void ppc40x_irq_init(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
 | 
			
		||||
                                                  env, PPC40x_INPUT_NB);
 | 
			
		||||
                                                  cpu, PPC40x_INPUT_NB);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* PowerPC E500 internal IRQ controller */
 | 
			
		||||
static void ppce500_set_irq (void *opaque, int pin, int level)
 | 
			
		||||
static void ppce500_set_irq(void *opaque, int pin, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = opaque;
 | 
			
		||||
    PowerPCCPU *cpu = opaque;
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    int cur_level;
 | 
			
		||||
 | 
			
		||||
    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 | 
			
		||||
| 
						 | 
				
			
			@ -407,10 +420,12 @@ static void ppce500_set_irq (void *opaque, int pin, int level)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ppce500_irq_init (CPUPPCState *env)
 | 
			
		||||
void ppce500_irq_init(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
 | 
			
		||||
                                        env, PPCE500_INPUT_NB);
 | 
			
		||||
                                                  cpu, PPCE500_INPUT_NB);
 | 
			
		||||
}
 | 
			
		||||
/*****************************************************************************/
 | 
			
		||||
/* PowerPC time base and decrementer emulation */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -49,7 +49,7 @@ typedef struct spin_state {
 | 
			
		|||
} SpinState;
 | 
			
		||||
 | 
			
		||||
typedef struct spin_kick {
 | 
			
		||||
    CPUPPCState *env;
 | 
			
		||||
    PowerPCCPU *cpu;
 | 
			
		||||
    SpinInfo *spin;
 | 
			
		||||
} SpinKick;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -92,7 +92,8 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
 | 
			
		|||
static void spin_kick(void *data)
 | 
			
		||||
{
 | 
			
		||||
    SpinKick *kick = data;
 | 
			
		||||
    CPUPPCState *env = kick->env;
 | 
			
		||||
    CPUState *cpu = CPU(kick->cpu);
 | 
			
		||||
    CPUPPCState *env = &kick->cpu->env;
 | 
			
		||||
    SpinInfo *curspin = kick->spin;
 | 
			
		||||
    hwaddr map_size = 64 * 1024 * 1024;
 | 
			
		||||
    hwaddr map_start;
 | 
			
		||||
| 
						 | 
				
			
			@ -113,8 +114,8 @@ static void spin_kick(void *data)
 | 
			
		|||
 | 
			
		||||
    env->halted = 0;
 | 
			
		||||
    env->exception_index = -1;
 | 
			
		||||
    env->stopped = 0;
 | 
			
		||||
    qemu_cpu_kick(env);
 | 
			
		||||
    cpu->stopped = false;
 | 
			
		||||
    qemu_cpu_kick(cpu);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void spin_write(void *opaque, hwaddr addr, uint64_t value,
 | 
			
		||||
| 
						 | 
				
			
			@ -158,11 +159,11 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,
 | 
			
		|||
    if (!(ldq_p(&curspin->addr) & 1)) {
 | 
			
		||||
        /* run CPU */
 | 
			
		||||
        SpinKick kick = {
 | 
			
		||||
            .env = env,
 | 
			
		||||
            .cpu = ppc_env_get_cpu(env),
 | 
			
		||||
            .spin = curspin,
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        run_on_cpu(env, spin_kick, &kick);
 | 
			
		||||
        run_on_cpu(CPU(kick.cpu), spin_kick, &kick);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -576,13 +576,15 @@ static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
 | 
			
		|||
    return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void emulate_spapr_hypercall(CPUPPCState *env)
 | 
			
		||||
static void emulate_spapr_hypercall(PowerPCCPU *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
 | 
			
		||||
    if (msr_pr) {
 | 
			
		||||
        hcall_dprintf("Hypercall made with MSR[PR]=1\n");
 | 
			
		||||
        env->gpr[3] = H_PRIVILEGE;
 | 
			
		||||
    } else {
 | 
			
		||||
        env->gpr[3] = spapr_hypercall(env, env->gpr[3], &env->gpr[4]);
 | 
			
		||||
        env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -286,12 +286,12 @@ extern sPAPREnvironment *spapr;
 | 
			
		|||
    do { } while (0)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
typedef target_ulong (*spapr_hcall_fn)(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                       target_ulong opcode,
 | 
			
		||||
                                       target_ulong *args);
 | 
			
		||||
 | 
			
		||||
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
 | 
			
		||||
target_ulong spapr_hypercall(CPUPPCState *env, target_ulong opcode,
 | 
			
		||||
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
 | 
			
		||||
                             target_ulong *args);
 | 
			
		||||
 | 
			
		||||
int spapr_allocate_irq(int hint, bool lsi);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -75,9 +75,10 @@ static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
 | 
			
		|||
    return rb;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_enter(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                            target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    target_ulong flags = args[0];
 | 
			
		||||
    target_ulong pte_index = args[1];
 | 
			
		||||
    target_ulong pteh = args[2];
 | 
			
		||||
| 
						 | 
				
			
			@ -192,9 +193,10 @@ static target_ulong remove_hpte(CPUPPCState *env, target_ulong ptex,
 | 
			
		|||
    return REMOVE_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_remove(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                             target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    target_ulong flags = args[0];
 | 
			
		||||
    target_ulong pte_index = args[1];
 | 
			
		||||
    target_ulong avpn = args[2];
 | 
			
		||||
| 
						 | 
				
			
			@ -238,9 +240,10 @@ static target_ulong h_remove(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
 | 
			
		||||
#define H_BULK_REMOVE_MAX_BATCH        4
 | 
			
		||||
 | 
			
		||||
static target_ulong h_bulk_remove(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                  target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    int i;
 | 
			
		||||
 | 
			
		||||
    for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 | 
			
		||||
| 
						 | 
				
			
			@ -284,9 +287,10 @@ static target_ulong h_bulk_remove(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_protect(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_protect(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                              target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    target_ulong flags = args[0];
 | 
			
		||||
    target_ulong pte_index = args[1];
 | 
			
		||||
    target_ulong avpn = args[2];
 | 
			
		||||
| 
						 | 
				
			
			@ -321,7 +325,7 @@ static target_ulong h_protect(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_set_dabr(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                               target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    /* FIXME: actually implement this */
 | 
			
		||||
| 
						 | 
				
			
			@ -457,7 +461,7 @@ static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_register_vpa(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                   target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong flags = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -505,12 +509,14 @@ static target_ulong h_register_vpa(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_cede(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                           target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
 | 
			
		||||
    env->msr |= (1ULL << MSR_EE);
 | 
			
		||||
    hreg_compute_hflags(env);
 | 
			
		||||
    if (!cpu_has_work(env)) {
 | 
			
		||||
    if (!cpu_has_work(CPU(cpu))) {
 | 
			
		||||
        env->halted = 1;
 | 
			
		||||
        env->exception_index = EXCP_HLT;
 | 
			
		||||
        env->exit_request = 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -518,7 +524,7 @@ static target_ulong h_cede(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_rtas(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                           target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong rtas_r3 = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -530,7 +536,7 @@ static target_ulong h_rtas(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
                           nret, rtas_r3 + 12 + 4*nargs);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_logical_load(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                   target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong size = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -553,7 +559,7 @@ static target_ulong h_logical_load(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_PARAMETER;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_logical_store(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                    target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong size = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -577,7 +583,7 @@ static target_ulong h_logical_store(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_PARAMETER;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_logical_memop(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                    target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong dst   = args[0]; /* Destination address */
 | 
			
		||||
| 
						 | 
				
			
			@ -644,14 +650,14 @@ static target_ulong h_logical_memop(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_logical_icbi(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                   target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    /* Nothing to do on emulation, KVM will trap this in the kernel */
 | 
			
		||||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_logical_dcbf(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                   target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    /* Nothing to do on emulation, KVM will trap this in the kernel */
 | 
			
		||||
| 
						 | 
				
			
			@ -679,7 +685,7 @@ void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
 | 
			
		|||
    *slot = fn;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
target_ulong spapr_hypercall(CPUPPCState *env, target_ulong opcode,
 | 
			
		||||
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
 | 
			
		||||
                             target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    if ((opcode <= MAX_HCALL_OPCODE)
 | 
			
		||||
| 
						 | 
				
			
			@ -687,14 +693,14 @@ target_ulong spapr_hypercall(CPUPPCState *env, target_ulong opcode,
 | 
			
		|||
        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
 | 
			
		||||
 | 
			
		||||
        if (fn) {
 | 
			
		||||
            return fn(env, spapr, opcode, args);
 | 
			
		||||
            return fn(cpu, spapr, opcode, args);
 | 
			
		||||
        }
 | 
			
		||||
    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
 | 
			
		||||
               (opcode <= KVMPPC_HCALL_MAX)) {
 | 
			
		||||
        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 | 
			
		||||
 | 
			
		||||
        if (fn) {
 | 
			
		||||
            return fn(env, spapr, opcode, args);
 | 
			
		||||
            return fn(cpu, spapr, opcode, args);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -204,7 +204,7 @@ static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                              target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong liobn = args[0];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -264,7 +264,7 @@ static int check_bd(VIOsPAPRVLANDevice *dev, vlan_bd_t bd,
 | 
			
		|||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_register_logical_lan(CPUPPCState *env,
 | 
			
		||||
static target_ulong h_register_logical_lan(PowerPCCPU *cpu,
 | 
			
		||||
                                           sPAPREnvironment *spapr,
 | 
			
		||||
                                           target_ulong opcode,
 | 
			
		||||
                                           target_ulong *args)
 | 
			
		||||
| 
						 | 
				
			
			@ -328,7 +328,7 @@ static target_ulong h_register_logical_lan(CPUPPCState *env,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static target_ulong h_free_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_free_logical_lan(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                       target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -349,7 +349,7 @@ static target_ulong h_free_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_add_logical_lan_buffer(CPUPPCState *env,
 | 
			
		||||
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
 | 
			
		||||
                                             sPAPREnvironment *spapr,
 | 
			
		||||
                                             target_ulong opcode,
 | 
			
		||||
                                             target_ulong *args)
 | 
			
		||||
| 
						 | 
				
			
			@ -398,7 +398,7 @@ static target_ulong h_add_logical_lan_buffer(CPUPPCState *env,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_send_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_send_logical_lan(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                       target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -467,7 +467,7 @@ static target_ulong h_send_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_multicast_ctrl(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                     target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -163,6 +163,7 @@ static void rtas_start_cpu(sPAPREnvironment *spapr,
 | 
			
		|||
                           uint32_t nret, target_ulong rets)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong id, start, r3;
 | 
			
		||||
    CPUState *cpu;
 | 
			
		||||
    CPUPPCState *env;
 | 
			
		||||
 | 
			
		||||
    if (nargs != 3 || nret != 1) {
 | 
			
		||||
| 
						 | 
				
			
			@ -175,6 +176,8 @@ static void rtas_start_cpu(sPAPREnvironment *spapr,
 | 
			
		|||
    r3 = rtas_ld(args, 2);
 | 
			
		||||
 | 
			
		||||
    for (env = first_cpu; env; env = env->next_cpu) {
 | 
			
		||||
        cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
        if (env->cpu_index != id) {
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
| 
						 | 
				
			
			@ -194,7 +197,7 @@ static void rtas_start_cpu(sPAPREnvironment *spapr,
 | 
			
		|||
        env->gpr[3] = r3;
 | 
			
		||||
        env->halted = 0;
 | 
			
		||||
 | 
			
		||||
        qemu_cpu_kick(env);
 | 
			
		||||
        qemu_cpu_kick(cpu);
 | 
			
		||||
 | 
			
		||||
        rtas_st(rets, 0, 0);
 | 
			
		||||
        return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -161,7 +161,7 @@ static int vio_make_devnode(VIOsPAPRDevice *dev,
 | 
			
		|||
/*
 | 
			
		||||
 * CRQ handling
 | 
			
		||||
 */
 | 
			
		||||
static target_ulong h_reg_crq(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_reg_crq(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                              target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -219,7 +219,7 @@ static target_ulong free_crq(VIOsPAPRDevice *dev)
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_free_crq(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_free_crq(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                               target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -233,7 +233,7 @@ static target_ulong h_free_crq(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return free_crq(dev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_send_crq(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_send_crq(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                               target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -256,7 +256,7 @@ static target_ulong h_send_crq(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_HARDWARE;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_enable_crq(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_enable_crq(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                 target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -463,7 +463,7 @@ static int spapr_vio_busdev_init(DeviceState *qdev)
 | 
			
		|||
    return pc->init(dev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_vio_signal(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_vio_signal(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                 target_ulong opcode,
 | 
			
		||||
                                 target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -70,7 +70,7 @@ static int spapr_vty_init(VIOsPAPRDevice *sdev)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/* Forward declaration */
 | 
			
		||||
static target_ulong h_put_term_char(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_put_term_char(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                    target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -97,7 +97,7 @@ static target_ulong h_put_term_char(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_get_term_char(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_get_term_char(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                                    target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong reg = args[0];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -259,7 +259,7 @@ static void cpu_kick_irq(SPARCCPU *cpu)
 | 
			
		|||
 | 
			
		||||
    env->halted = 0;
 | 
			
		||||
    cpu_check_irqs(env);
 | 
			
		||||
    qemu_cpu_kick(env);
 | 
			
		||||
    qemu_cpu_kick(CPU(cpu));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void cpu_set_irq(void *opaque, int irq, int level)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -317,7 +317,7 @@ static void cpu_kick_irq(SPARCCPU *cpu)
 | 
			
		|||
 | 
			
		||||
    env->halted = 0;
 | 
			
		||||
    cpu_check_irqs(env);
 | 
			
		||||
    qemu_cpu_kick(env);
 | 
			
		||||
    qemu_cpu_kick(CPU(cpu));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void cpu_set_ivec_irq(void *opaque, int irq, int level)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										11
									
								
								hw/xics.c
								
								
								
								
							
							
						
						
									
										11
									
								
								hw/xics.c
								
								
								
								
							| 
						 | 
				
			
			@ -340,16 +340,17 @@ void xics_set_irq_type(struct icp_state *icp, int irq, bool lsi)
 | 
			
		|||
    icp->ics->irqs[irq - icp->ics->offset].lsi = lsi;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_cppr(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                           target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    target_ulong cppr = args[0];
 | 
			
		||||
 | 
			
		||||
    icp_set_cppr(spapr->icp, env->cpu_index, cppr);
 | 
			
		||||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_ipi(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                          target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    target_ulong server = args[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -364,18 +365,20 @@ static target_ulong h_ipi(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		|||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_xirr(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                           target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    uint32_t xirr = icp_accept(spapr->icp->ss + env->cpu_index);
 | 
			
		||||
 | 
			
		||||
    args[0] = xirr;
 | 
			
		||||
    return H_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static target_ulong h_eoi(CPUPPCState *env, sPAPREnvironment *spapr,
 | 
			
		||||
static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 | 
			
		||||
                          target_ulong opcode, target_ulong *args)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    target_ulong xirr = args[0];
 | 
			
		||||
 | 
			
		||||
    icp_eoi(spapr->icp, env->cpu_index, xirr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -125,12 +125,13 @@ void xtensa_rearm_ccompare_timer(CPUXtensaState *env)
 | 
			
		|||
 | 
			
		||||
static void xtensa_ccompare_cb(void *opaque)
 | 
			
		||||
{
 | 
			
		||||
    CPUXtensaState *env = opaque;
 | 
			
		||||
    XtensaCPU *cpu = opaque;
 | 
			
		||||
    CPUXtensaState *env = &cpu->env;
 | 
			
		||||
 | 
			
		||||
    if (env->halted) {
 | 
			
		||||
        env->halt_clock = qemu_get_clock_ns(vm_clock);
 | 
			
		||||
        xtensa_advance_ccount(env, env->wake_ccount - env->sregs[CCOUNT]);
 | 
			
		||||
        if (!cpu_has_work(env)) {
 | 
			
		||||
        if (!cpu_has_work(CPU(cpu))) {
 | 
			
		||||
            env->sregs[CCOUNT] = env->wake_ccount + 1;
 | 
			
		||||
            xtensa_rearm_ccompare_timer(env);
 | 
			
		||||
        }
 | 
			
		||||
| 
						 | 
				
			
			@ -139,12 +140,14 @@ static void xtensa_ccompare_cb(void *opaque)
 | 
			
		|||
 | 
			
		||||
void xtensa_irq_init(CPUXtensaState *env)
 | 
			
		||||
{
 | 
			
		||||
    XtensaCPU *cpu = xtensa_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    env->irq_inputs = (void **)qemu_allocate_irqs(
 | 
			
		||||
            xtensa_set_irq, env, env->config->ninterrupt);
 | 
			
		||||
    if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT) &&
 | 
			
		||||
            env->config->nccompare > 0) {
 | 
			
		||||
        env->ccompare_timer =
 | 
			
		||||
            qemu_new_timer_ns(vm_clock, &xtensa_ccompare_cb, env);
 | 
			
		||||
            qemu_new_timer_ns(vm_clock, &xtensa_ccompare_cb, cpu);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -54,6 +54,9 @@ typedef struct CPUClass {
 | 
			
		|||
 | 
			
		||||
/**
 | 
			
		||||
 * CPUState:
 | 
			
		||||
 * @created: Indicates whether the CPU thread has been successfully created.
 | 
			
		||||
 * @stop: Indicates a pending stop request.
 | 
			
		||||
 * @stopped: Indicates the CPU has been artificially stopped.
 | 
			
		||||
 *
 | 
			
		||||
 * State of one CPU core or thread.
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			@ -66,7 +69,13 @@ struct CPUState {
 | 
			
		|||
#ifdef _WIN32
 | 
			
		||||
    HANDLE hThread;
 | 
			
		||||
#endif
 | 
			
		||||
    int thread_id;
 | 
			
		||||
    struct QemuCond *halt_cond;
 | 
			
		||||
    struct qemu_work_item *queued_work_first, *queued_work_last;
 | 
			
		||||
    bool thread_kicked;
 | 
			
		||||
    bool created;
 | 
			
		||||
    bool stop;
 | 
			
		||||
    bool stopped;
 | 
			
		||||
 | 
			
		||||
    /* TODO Move common fields from CPUArchState here. */
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -78,5 +87,54 @@ struct CPUState {
 | 
			
		|||
 */
 | 
			
		||||
void cpu_reset(CPUState *cpu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * qemu_cpu_has_work:
 | 
			
		||||
 * @cpu: The vCPU to check.
 | 
			
		||||
 *
 | 
			
		||||
 * Checks whether the CPU has work to do.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns: %true if the CPU has work, %false otherwise.
 | 
			
		||||
 */
 | 
			
		||||
bool qemu_cpu_has_work(CPUState *cpu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * qemu_cpu_is_self:
 | 
			
		||||
 * @cpu: The vCPU to check against.
 | 
			
		||||
 *
 | 
			
		||||
 * Checks whether the caller is executing on the vCPU thread.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns: %true if called from @cpu's thread, %false otherwise.
 | 
			
		||||
 */
 | 
			
		||||
bool qemu_cpu_is_self(CPUState *cpu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * qemu_cpu_kick:
 | 
			
		||||
 * @cpu: The vCPU to kick.
 | 
			
		||||
 *
 | 
			
		||||
 * Kicks @cpu's thread.
 | 
			
		||||
 */
 | 
			
		||||
void qemu_cpu_kick(CPUState *cpu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * cpu_is_stopped:
 | 
			
		||||
 * @cpu: The CPU to check.
 | 
			
		||||
 *
 | 
			
		||||
 * Checks whether the CPU is stopped.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns: %true if run state is not running or if artificially stopped;
 | 
			
		||||
 * %false otherwise.
 | 
			
		||||
 */
 | 
			
		||||
bool cpu_is_stopped(CPUState *cpu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * run_on_cpu:
 | 
			
		||||
 * @cpu: The vCPU to run on.
 | 
			
		||||
 * @func: The function to be executed.
 | 
			
		||||
 * @data: Data to pass to the function.
 | 
			
		||||
 *
 | 
			
		||||
 * Schedules the function @func for execution on the vCPU @cpu.
 | 
			
		||||
 */
 | 
			
		||||
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										13
									
								
								kvm-all.c
								
								
								
								
							
							
						
						
									
										13
									
								
								kvm-all.c
								
								
								
								
							| 
						 | 
				
			
			@ -828,10 +828,12 @@ static MemoryListener kvm_io_listener = {
 | 
			
		|||
 | 
			
		||||
static void kvm_handle_interrupt(CPUArchState *env, int mask)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    env->interrupt_request |= mask;
 | 
			
		||||
 | 
			
		||||
    if (!qemu_cpu_is_self(env)) {
 | 
			
		||||
        qemu_cpu_kick(env);
 | 
			
		||||
    if (!qemu_cpu_is_self(cpu)) {
 | 
			
		||||
        qemu_cpu_kick(cpu);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1498,8 +1500,10 @@ static void do_kvm_cpu_synchronize_state(void *_env)
 | 
			
		|||
 | 
			
		||||
void kvm_cpu_synchronize_state(CPUArchState *env)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
 | 
			
		||||
    if (!env->kvm_vcpu_dirty) {
 | 
			
		||||
        run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
 | 
			
		||||
        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, env);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1785,6 +1789,7 @@ static void kvm_invoke_set_guest_debug(void *data)
 | 
			
		|||
 | 
			
		||||
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
    struct kvm_set_guest_debug_data data;
 | 
			
		||||
 | 
			
		||||
    data.dbg.control = reinject_trap;
 | 
			
		||||
| 
						 | 
				
			
			@ -1795,7 +1800,7 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
 | 
			
		|||
    kvm_arch_update_guest_debug(env, &data.dbg);
 | 
			
		||||
    data.env = env;
 | 
			
		||||
 | 
			
		||||
    run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
 | 
			
		||||
    run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
 | 
			
		||||
    return data.err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1988,7 +1988,8 @@ static void do_acl_remove(Monitor *mon, const QDict *qdict)
 | 
			
		|||
#if defined(TARGET_I386)
 | 
			
		||||
static void do_inject_mce(Monitor *mon, const QDict *qdict)
 | 
			
		||||
{
 | 
			
		||||
    CPUArchState *cenv;
 | 
			
		||||
    X86CPU *cpu;
 | 
			
		||||
    CPUX86State *cenv;
 | 
			
		||||
    int cpu_index = qdict_get_int(qdict, "cpu_index");
 | 
			
		||||
    int bank = qdict_get_int(qdict, "bank");
 | 
			
		||||
    uint64_t status = qdict_get_int(qdict, "status");
 | 
			
		||||
| 
						 | 
				
			
			@ -2001,8 +2002,9 @@ static void do_inject_mce(Monitor *mon, const QDict *qdict)
 | 
			
		|||
        flags |= MCE_INJECT_BROADCAST;
 | 
			
		||||
    }
 | 
			
		||||
    for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
 | 
			
		||||
        cpu = x86_env_get_cpu(cenv);
 | 
			
		||||
        if (cenv->cpu_index == cpu_index) {
 | 
			
		||||
            cpu_x86_inject_mce(mon, cenv, bank, status, mcg_status, addr, misc,
 | 
			
		||||
            cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
 | 
			
		||||
                               flags);
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -324,9 +324,7 @@ void cpu_save(QEMUFile *f, void *opaque);
 | 
			
		|||
int cpu_load(QEMUFile *f, void *opaque, int version_id);
 | 
			
		||||
 | 
			
		||||
/* Unblock cpu */
 | 
			
		||||
void qemu_cpu_kick(void *env);
 | 
			
		||||
void qemu_cpu_kick_self(void);
 | 
			
		||||
int qemu_cpu_is_self(void *env);
 | 
			
		||||
 | 
			
		||||
/* work queue */
 | 
			
		||||
struct qemu_work_item {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -19,7 +19,7 @@
 | 
			
		|||
 * <http://www.gnu.org/licenses/lgpl-2.1.html>
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "cpu-qom.h"
 | 
			
		||||
#include "cpu.h"
 | 
			
		||||
#include "qemu-common.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -510,8 +510,10 @@ static inline void cpu_set_tls(CPUAlphaState *env, target_ulong newtls)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUAlphaState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUAlphaState *env = &ALPHA_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    /* Here we are checking to see if the CPU should wake up from HALT.
 | 
			
		||||
       We will have gotten into this state only for WTINT from PALmode.  */
 | 
			
		||||
    /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -718,8 +718,10 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUARMState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUARMState *env = &ARM_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request &
 | 
			
		||||
        (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -285,8 +285,10 @@ static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
 | 
			
		|||
#define cpu_list cris_cpu_list
 | 
			
		||||
void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf);
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUCRISState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUCRISState *env = &CRIS_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,6 +37,13 @@
 | 
			
		|||
#include <linux/kvm_para.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include "sysemu.h"
 | 
			
		||||
#ifndef CONFIG_USER_ONLY
 | 
			
		||||
#include "hw/xen.h"
 | 
			
		||||
#include "hw/sysbus.h"
 | 
			
		||||
#include "hw/apic_internal.h"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* feature flags taken from "Intel Processor Identification and the CPUID
 | 
			
		||||
 * Instruction" and AMD's "CPUID Specification".  In cases of disagreement
 | 
			
		||||
 * between feature naming conventions, aliases may be added.
 | 
			
		||||
| 
						 | 
				
			
			@ -1427,7 +1434,8 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
 | 
			
		|||
        env->cpuid_svm_features &= TCG_SVM_FEATURES;
 | 
			
		||||
    }
 | 
			
		||||
    object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
 | 
			
		||||
    if (error_is_set(&error)) {
 | 
			
		||||
    if (error) {
 | 
			
		||||
        fprintf(stderr, "%s\n", error_get_pretty(error));
 | 
			
		||||
        error_free(error);
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -1878,12 +1886,65 @@ static void mce_init(X86CPU *cpu)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define MSI_ADDR_BASE 0xfee00000
 | 
			
		||||
 | 
			
		||||
#ifndef CONFIG_USER_ONLY
 | 
			
		||||
static void x86_cpu_apic_init(X86CPU *cpu, Error **errp)
 | 
			
		||||
{
 | 
			
		||||
    static int apic_mapped;
 | 
			
		||||
    CPUX86State *env = &cpu->env;
 | 
			
		||||
    APICCommonState *apic;
 | 
			
		||||
    const char *apic_type = "apic";
 | 
			
		||||
 | 
			
		||||
    if (kvm_irqchip_in_kernel()) {
 | 
			
		||||
        apic_type = "kvm-apic";
 | 
			
		||||
    } else if (xen_enabled()) {
 | 
			
		||||
        apic_type = "xen-apic";
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    env->apic_state = qdev_try_create(NULL, apic_type);
 | 
			
		||||
    if (env->apic_state == NULL) {
 | 
			
		||||
        error_setg(errp, "APIC device '%s' could not be created", apic_type);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    object_property_add_child(OBJECT(cpu), "apic",
 | 
			
		||||
                              OBJECT(env->apic_state), NULL);
 | 
			
		||||
    qdev_prop_set_uint8(env->apic_state, "id", env->cpuid_apic_id);
 | 
			
		||||
    /* TODO: convert to link<> */
 | 
			
		||||
    apic = APIC_COMMON(env->apic_state);
 | 
			
		||||
    apic->cpu = cpu;
 | 
			
		||||
 | 
			
		||||
    if (qdev_init(env->apic_state)) {
 | 
			
		||||
        error_setg(errp, "APIC device '%s' could not be initialized",
 | 
			
		||||
                   object_get_typename(OBJECT(env->apic_state)));
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* XXX: mapping more APICs at the same memory location */
 | 
			
		||||
    if (apic_mapped == 0) {
 | 
			
		||||
        /* NOTE: the APIC is directly connected to the CPU - it is not
 | 
			
		||||
           on the global memory bus. */
 | 
			
		||||
        /* XXX: what if the base changes? */
 | 
			
		||||
        sysbus_mmio_map(sysbus_from_qdev(env->apic_state), 0, MSI_ADDR_BASE);
 | 
			
		||||
        apic_mapped = 1;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void x86_cpu_realize(Object *obj, Error **errp)
 | 
			
		||||
{
 | 
			
		||||
    X86CPU *cpu = X86_CPU(obj);
 | 
			
		||||
 | 
			
		||||
#ifndef CONFIG_USER_ONLY
 | 
			
		||||
    qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
 | 
			
		||||
 | 
			
		||||
    if (cpu->env.cpuid_features & CPUID_APIC || smp_cpus > 1) {
 | 
			
		||||
        x86_cpu_apic_init(cpu, errp);
 | 
			
		||||
        if (error_is_set(errp)) {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    mce_init(cpu);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -907,9 +907,11 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env,
 | 
			
		||||
static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
 | 
			
		||||
                                               int sipi_vector)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *env = &cpu->env;
 | 
			
		||||
 | 
			
		||||
    env->eip = 0;
 | 
			
		||||
    cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
 | 
			
		||||
                           sipi_vector << 12,
 | 
			
		||||
| 
						 | 
				
			
			@ -1098,8 +1100,10 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
 | 
			
		|||
#include "hw/apic.h"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUX86State *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *env = &X86_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
 | 
			
		||||
                                       CPU_INTERRUPT_POLL)) &&
 | 
			
		||||
            (env->eflags & IF_MASK)) ||
 | 
			
		||||
| 
						 | 
				
			
			@ -1131,7 +1135,7 @@ void do_cpu_sipi(X86CPU *cpu);
 | 
			
		|||
#define MCE_INJECT_BROADCAST    1
 | 
			
		||||
#define MCE_INJECT_UNCOND_AO    2
 | 
			
		||||
 | 
			
		||||
void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
 | 
			
		||||
void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
 | 
			
		||||
                        uint64_t status, uint64_t mcg_status, uint64_t addr,
 | 
			
		||||
                        uint64_t misc, int flags);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1141,10 +1141,11 @@ static void do_inject_x86_mce(void *data)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
 | 
			
		||||
void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
 | 
			
		||||
                        uint64_t status, uint64_t mcg_status, uint64_t addr,
 | 
			
		||||
                        uint64_t misc, int flags)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *cenv = &cpu->env;
 | 
			
		||||
    MCEInjectionParams params = {
 | 
			
		||||
        .mon = mon,
 | 
			
		||||
        .env = cenv,
 | 
			
		||||
| 
						 | 
				
			
			@ -1176,7 +1177,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
 | 
			
		|||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    run_on_cpu(cenv, do_inject_x86_mce, ¶ms);
 | 
			
		||||
    run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms);
 | 
			
		||||
    if (flags & MCE_INJECT_BROADCAST) {
 | 
			
		||||
        params.bank = 1;
 | 
			
		||||
        params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
 | 
			
		||||
| 
						 | 
				
			
			@ -1188,7 +1189,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
 | 
			
		|||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
            params.env = env;
 | 
			
		||||
            run_on_cpu(cenv, do_inject_x86_mce, ¶ms);
 | 
			
		||||
            run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1243,6 +1244,7 @@ X86CPU *cpu_x86_init(const char *cpu_model)
 | 
			
		|||
{
 | 
			
		||||
    X86CPU *cpu;
 | 
			
		||||
    CPUX86State *env;
 | 
			
		||||
    Error *error = NULL;
 | 
			
		||||
 | 
			
		||||
    cpu = X86_CPU(object_new(TYPE_X86_CPU));
 | 
			
		||||
    env = &cpu->env;
 | 
			
		||||
| 
						 | 
				
			
			@ -1253,8 +1255,12 @@ X86CPU *cpu_x86_init(const char *cpu_model)
 | 
			
		|||
        return NULL;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    x86_cpu_realize(OBJECT(cpu), NULL);
 | 
			
		||||
 | 
			
		||||
    x86_cpu_realize(OBJECT(cpu), &error);
 | 
			
		||||
    if (error) {
 | 
			
		||||
        error_free(error);
 | 
			
		||||
        object_delete(OBJECT(cpu));
 | 
			
		||||
        return NULL;
 | 
			
		||||
    }
 | 
			
		||||
    return cpu;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -229,8 +229,9 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
 | 
			
		|||
    return -ENOSYS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void kvm_mce_inject(CPUX86State *env, hwaddr paddr, int code)
 | 
			
		||||
static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *env = &cpu->env;
 | 
			
		||||
    uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
 | 
			
		||||
                      MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
 | 
			
		||||
    uint64_t mcg_status = MCG_STATUS_MCIP;
 | 
			
		||||
| 
						 | 
				
			
			@ -242,7 +243,7 @@ static void kvm_mce_inject(CPUX86State *env, hwaddr paddr, int code)
 | 
			
		|||
        status |= 0xc0;
 | 
			
		||||
        mcg_status |= MCG_STATUS_RIPV;
 | 
			
		||||
    }
 | 
			
		||||
    cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
 | 
			
		||||
    cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
 | 
			
		||||
                       (MCM_ADDR_PHYS << 6) | 0xc,
 | 
			
		||||
                       cpu_x86_support_mca_broadcast(env) ?
 | 
			
		||||
                       MCE_INJECT_BROADCAST : 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -256,6 +257,7 @@ static void hardware_memory_error(void)
 | 
			
		|||
 | 
			
		||||
int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
 | 
			
		||||
{
 | 
			
		||||
    X86CPU *cpu = x86_env_get_cpu(env);
 | 
			
		||||
    ram_addr_t ram_addr;
 | 
			
		||||
    hwaddr paddr;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -273,7 +275,7 @@ int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
 | 
			
		|||
            }
 | 
			
		||||
        }
 | 
			
		||||
        kvm_hwpoison_page_add(ram_addr);
 | 
			
		||||
        kvm_mce_inject(env, paddr, code);
 | 
			
		||||
        kvm_mce_inject(cpu, paddr, code);
 | 
			
		||||
    } else {
 | 
			
		||||
        if (code == BUS_MCEERR_AO) {
 | 
			
		||||
            return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -301,7 +303,7 @@ int kvm_arch_on_sigbus(int code, void *addr)
 | 
			
		|||
            return 0;
 | 
			
		||||
        }
 | 
			
		||||
        kvm_hwpoison_page_add(ram_addr);
 | 
			
		||||
        kvm_mce_inject(first_cpu, paddr, code);
 | 
			
		||||
        kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code);
 | 
			
		||||
    } else {
 | 
			
		||||
        if (code == BUS_MCEERR_AO) {
 | 
			
		||||
            return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -1365,8 +1367,9 @@ static int kvm_put_mp_state(CPUX86State *env)
 | 
			
		|||
    return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int kvm_get_mp_state(CPUX86State *env)
 | 
			
		||||
static int kvm_get_mp_state(X86CPU *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *env = &cpu->env;
 | 
			
		||||
    struct kvm_mp_state mp_state;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1552,9 +1555,10 @@ static int kvm_get_debugregs(CPUX86State *env)
 | 
			
		|||
 | 
			
		||||
int kvm_arch_put_registers(CPUX86State *env, int level)
 | 
			
		||||
{
 | 
			
		||||
    CPUState *cpu = ENV_GET_CPU(env);
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
 | 
			
		||||
    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
 | 
			
		||||
 | 
			
		||||
    ret = kvm_getput_regs(env, 1);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1609,9 +1613,10 @@ int kvm_arch_put_registers(CPUX86State *env, int level)
 | 
			
		|||
 | 
			
		||||
int kvm_arch_get_registers(CPUX86State *env)
 | 
			
		||||
{
 | 
			
		||||
    X86CPU *cpu = x86_env_get_cpu(env);
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
 | 
			
		||||
    assert(cpu_is_stopped(CPU(cpu)) || qemu_cpu_is_self(CPU(cpu)));
 | 
			
		||||
 | 
			
		||||
    ret = kvm_getput_regs(env, 0);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1633,7 +1638,7 @@ int kvm_arch_get_registers(CPUX86State *env)
 | 
			
		|||
    if (ret < 0) {
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
    ret = kvm_get_mp_state(env);
 | 
			
		||||
    ret = kvm_get_mp_state(cpu);
 | 
			
		||||
    if (ret < 0) {
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -1781,8 +1786,10 @@ int kvm_arch_process_async_events(CPUX86State *env)
 | 
			
		|||
    return env->halted;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int kvm_handle_halt(CPUX86State *env)
 | 
			
		||||
static int kvm_handle_halt(X86CPU *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUX86State *env = &cpu->env;
 | 
			
		||||
 | 
			
		||||
    if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
 | 
			
		||||
          (env->eflags & IF_MASK)) &&
 | 
			
		||||
        !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1996,13 +2003,14 @@ static bool host_supports_vmx(void)
 | 
			
		|||
 | 
			
		||||
int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
 | 
			
		||||
{
 | 
			
		||||
    X86CPU *cpu = x86_env_get_cpu(env);
 | 
			
		||||
    uint64_t code;
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    switch (run->exit_reason) {
 | 
			
		||||
    case KVM_EXIT_HLT:
 | 
			
		||||
        DPRINTF("handle_hlt\n");
 | 
			
		||||
        ret = kvm_handle_halt(env);
 | 
			
		||||
        ret = kvm_handle_halt(cpu);
 | 
			
		||||
        break;
 | 
			
		||||
    case KVM_EXIT_SET_TPR:
 | 
			
		||||
        ret = 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -253,8 +253,10 @@ static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
 | 
			
		|||
    *flags = 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPULM32State *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPULM32State *env = &LM32_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request & CPU_INTERRUPT_HARD;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -257,8 +257,10 @@ static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
 | 
			
		|||
            | ((env->macsr >> 4) & 0xf);        /* Bits 0-3 */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUM68KState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUM68KState *env = &M68K_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request & CPU_INTERRUPT_HARD;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -374,8 +374,10 @@ void cpu_unassigned_access(CPUMBState *env1, hwaddr addr,
 | 
			
		|||
                           int is_write, int is_exec, int is_asi, int size);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUMBState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUMBState *env = &MICROBLAZE_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -710,16 +710,17 @@ static inline int mips_vpe_active(CPUMIPSState *env)
 | 
			
		|||
    return active;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int cpu_has_work(CPUMIPSState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    int has_work = 0;
 | 
			
		||||
    CPUMIPSState *env = &MIPS_CPU(cpu)->env;
 | 
			
		||||
    bool has_work = false;
 | 
			
		||||
 | 
			
		||||
    /* It is implementation dependent if non-enabled interrupts
 | 
			
		||||
       wake-up the CPU, however most of the implementations only
 | 
			
		||||
       check for interrupts that can be taken. */
 | 
			
		||||
    if ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
 | 
			
		||||
        cpu_mips_hw_interrupts_pending(env)) {
 | 
			
		||||
        has_work = 1;
 | 
			
		||||
        has_work = true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* MIPS-MT has the ability to halt the CPU.  */
 | 
			
		||||
| 
						 | 
				
			
			@ -727,11 +728,11 @@ static inline int cpu_has_work(CPUMIPSState *env)
 | 
			
		|||
        /* The QEMU model will issue an _WAKE request whenever the CPUs
 | 
			
		||||
           should be woken up.  */
 | 
			
		||||
        if (env->interrupt_request & CPU_INTERRUPT_WAKE) {
 | 
			
		||||
            has_work = 1;
 | 
			
		||||
            has_work = true;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (!mips_vpe_active(env)) {
 | 
			
		||||
            has_work = 0;
 | 
			
		||||
            has_work = false;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    return has_work;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -437,8 +437,10 @@ static inline int cpu_mmu_index(CPUOpenRISCState *env)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#define CPU_INTERRUPT_TIMER   CPU_INTERRUPT_TGT_INT_0
 | 
			
		||||
static inline bool cpu_has_work(CPUOpenRISCState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUOpenRISCState *env = &OPENRISC_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request & (CPU_INTERRUPT_HARD |
 | 
			
		||||
                                     CPU_INTERRUPT_TIMER);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2222,10 +2222,12 @@ static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr)
 | 
			
		|||
    return msr & (1ULL << MSR_SF);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern void (*cpu_ppc_hypercall)(CPUPPCState *);
 | 
			
		||||
extern void (*cpu_ppc_hypercall)(PowerPCCPU *);
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUPPCState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &POWERPC_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return msr_ee && (env->interrupt_request & CPU_INTERRUPT_HARD);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -33,7 +33,7 @@
 | 
			
		|||
/*****************************************************************************/
 | 
			
		||||
/* PowerPC Hypercall emulation */
 | 
			
		||||
 | 
			
		||||
void (*cpu_ppc_hypercall)(CPUPPCState *);
 | 
			
		||||
void (*cpu_ppc_hypercall)(PowerPCCPU *);
 | 
			
		||||
 | 
			
		||||
/*****************************************************************************/
 | 
			
		||||
/* Exception processing */
 | 
			
		||||
| 
						 | 
				
			
			@ -63,8 +63,9 @@ static inline void dump_syscall(CPUPPCState *env)
 | 
			
		|||
/* Note that this function should be greatly optimized
 | 
			
		||||
 * when called with a constant excp, from ppc_hw_interrupt
 | 
			
		||||
 */
 | 
			
		||||
static inline void powerpc_excp(CPUPPCState *env, int excp_model, int excp)
 | 
			
		||||
static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
 | 
			
		||||
{
 | 
			
		||||
    CPUPPCState *env = &cpu->env;
 | 
			
		||||
    target_ulong msr, new_msr, vector;
 | 
			
		||||
    int srr0, srr1, asrr0, asrr1;
 | 
			
		||||
    int lpes0, lpes1, lev;
 | 
			
		||||
| 
						 | 
				
			
			@ -238,7 +239,7 @@ static inline void powerpc_excp(CPUPPCState *env, int excp_model, int excp)
 | 
			
		|||
        dump_syscall(env);
 | 
			
		||||
        lev = env->error_code;
 | 
			
		||||
        if ((lev == 1) && cpu_ppc_hypercall) {
 | 
			
		||||
            cpu_ppc_hypercall(env);
 | 
			
		||||
            cpu_ppc_hypercall(cpu);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        if (lev == 1 || (lpes0 == 0 && lpes1 == 0)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -643,11 +644,14 @@ static inline void powerpc_excp(CPUPPCState *env, int excp_model, int excp)
 | 
			
		|||
 | 
			
		||||
void do_interrupt(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    powerpc_excp(env, env->excp_model, env->exception_index);
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
 | 
			
		||||
    powerpc_excp(cpu, env->excp_model, env->exception_index);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ppc_hw_interrupt(CPUPPCState *env)
 | 
			
		||||
{
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 | 
			
		||||
    int hdice;
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
| 
						 | 
				
			
			@ -658,20 +662,20 @@ void ppc_hw_interrupt(CPUPPCState *env)
 | 
			
		|||
    /* External reset */
 | 
			
		||||
    if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
 | 
			
		||||
        env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
 | 
			
		||||
        powerpc_excp(env, env->excp_model, POWERPC_EXCP_RESET);
 | 
			
		||||
        powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    /* Machine check exception */
 | 
			
		||||
    if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
 | 
			
		||||
        env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
 | 
			
		||||
        powerpc_excp(env, env->excp_model, POWERPC_EXCP_MCHECK);
 | 
			
		||||
        powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
#if 0 /* TODO */
 | 
			
		||||
    /* External debug exception */
 | 
			
		||||
    if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
 | 
			
		||||
        env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
 | 
			
		||||
        powerpc_excp(env, env->excp_model, POWERPC_EXCP_DEBUG);
 | 
			
		||||
        powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -685,7 +689,7 @@ void ppc_hw_interrupt(CPUPPCState *env)
 | 
			
		|||
        /* Hypervisor decrementer exception */
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_HDECR);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -698,7 +702,7 @@ void ppc_hw_interrupt(CPUPPCState *env)
 | 
			
		|||
#if 0
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT);
 | 
			
		||||
#endif
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_CRITICAL);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -706,30 +710,30 @@ void ppc_hw_interrupt(CPUPPCState *env)
 | 
			
		|||
        /* Watchdog timer on embedded PowerPC */
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_WDT);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORCI);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        /* Fixed interval timer on embedded PowerPC */
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_FIT);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        /* Programmable interval timer on embedded PowerPC */
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_PIT);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        /* Decrementer exception */
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_DECR);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        /* External interrupt */
 | 
			
		||||
| 
						 | 
				
			
			@ -740,23 +744,23 @@ void ppc_hw_interrupt(CPUPPCState *env)
 | 
			
		|||
#if 0
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT);
 | 
			
		||||
#endif
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_EXTERNAL);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_DOORI);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_PERFM);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        /* Thermal interrupt */
 | 
			
		||||
        if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
 | 
			
		||||
            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
 | 
			
		||||
            powerpc_excp(env, env->excp_model, POWERPC_EXCP_THERM);
 | 
			
		||||
            powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,9 +73,11 @@ static int cap_hior;
 | 
			
		|||
 */
 | 
			
		||||
static QEMUTimer *idle_timer;
 | 
			
		||||
 | 
			
		||||
static void kvm_kick_env(void *env)
 | 
			
		||||
static void kvm_kick_cpu(void *opaque)
 | 
			
		||||
{
 | 
			
		||||
    qemu_cpu_kick(env);
 | 
			
		||||
    PowerPCCPU *cpu = opaque;
 | 
			
		||||
 | 
			
		||||
    qemu_cpu_kick(CPU(cpu));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int kvm_arch_init(KVMState *s)
 | 
			
		||||
| 
						 | 
				
			
			@ -375,6 +377,7 @@ static inline void kvm_fixup_page_sizes(CPUPPCState *env)
 | 
			
		|||
 | 
			
		||||
int kvm_arch_init_vcpu(CPUPPCState *cenv)
 | 
			
		||||
{
 | 
			
		||||
    PowerPCCPU *cpu = ppc_env_get_cpu(cenv);
 | 
			
		||||
    int ret;
 | 
			
		||||
 | 
			
		||||
    /* Gather server mmu info from KVM and update the CPU state */
 | 
			
		||||
| 
						 | 
				
			
			@ -386,7 +389,7 @@ int kvm_arch_init_vcpu(CPUPPCState *cenv)
 | 
			
		|||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv);
 | 
			
		||||
    idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_cpu, cpu);
 | 
			
		||||
 | 
			
		||||
    /* Some targets support access to KVM's guest TLB. */
 | 
			
		||||
    switch (cenv->mmu_model) {
 | 
			
		||||
| 
						 | 
				
			
			@ -814,7 +817,8 @@ int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run)
 | 
			
		|||
#ifdef CONFIG_PSERIES
 | 
			
		||||
    case KVM_EXIT_PAPR_HCALL:
 | 
			
		||||
        dprintf("handle PAPR hypercall\n");
 | 
			
		||||
        run->papr_hcall.ret = spapr_hypercall(env, run->papr_hcall.nr,
 | 
			
		||||
        run->papr_hcall.ret = spapr_hypercall(ppc_env_get_cpu(env),
 | 
			
		||||
                                              run->papr_hcall.nr,
 | 
			
		||||
                                              run->papr_hcall.args);
 | 
			
		||||
        ret = 0;
 | 
			
		||||
        break;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -977,8 +977,10 @@ static inline void cpu_inject_ext(CPUS390XState *env, uint32_t code, uint32_t pa
 | 
			
		|||
    cpu_interrupt(env, CPU_INTERRUPT_HARD);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUS390XState *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUS390XState *env = &S390_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return (env->interrupt_request & CPU_INTERRUPT_HARD) &&
 | 
			
		||||
        (env->psw.mask & PSW_MASK_EXT);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -403,7 +403,7 @@ static int s390_cpu_restart(S390CPU *cpu)
 | 
			
		|||
 | 
			
		||||
    kvm_s390_interrupt(env, KVM_S390_RESTART, 0);
 | 
			
		||||
    s390_add_running_cpu(env);
 | 
			
		||||
    qemu_cpu_kick(env);
 | 
			
		||||
    qemu_cpu_kick(CPU(cpu));
 | 
			
		||||
    dprintf("DONE: SIGP cpu restart: %p\n", env);
 | 
			
		||||
    return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -371,8 +371,10 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
 | 
			
		|||
            | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 4 */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUSH4State *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUSH4State *env = &SUPERH_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request & CPU_INTERRUPT_HARD;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -764,8 +764,10 @@ static inline bool tb_am_enabled(int tb_flags)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUSPARCState *env1)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUSPARCState *env1 = &SPARC_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return (env1->interrupt_request & CPU_INTERRUPT_HARD) &&
 | 
			
		||||
           cpu_interrupts_enabled(env1);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -12,7 +12,7 @@
 | 
			
		|||
 * or (at your option) any later version.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "cpu-qom.h"
 | 
			
		||||
#include "cpu.h"
 | 
			
		||||
#include "qemu-common.h"
 | 
			
		||||
 | 
			
		||||
static inline void set_feature(CPUUniCore32State *env, int feature)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,8 +181,10 @@ void uc32_translate_init(void);
 | 
			
		|||
void do_interrupt(CPUUniCore32State *);
 | 
			
		||||
void switch_mode(CPUUniCore32State *, int);
 | 
			
		||||
 | 
			
		||||
static inline bool cpu_has_work(CPUUniCore32State *env)
 | 
			
		||||
static inline bool cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUUniCore32State *env = &UNICORE32_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->interrupt_request &
 | 
			
		||||
        (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -501,8 +501,10 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
 | 
			
		|||
#include "cpu-all.h"
 | 
			
		||||
#include "exec-all.h"
 | 
			
		||||
 | 
			
		||||
static inline int cpu_has_work(CPUXtensaState *env)
 | 
			
		||||
static inline int cpu_has_work(CPUState *cpu)
 | 
			
		||||
{
 | 
			
		||||
    CPUXtensaState *env = &XTENSA_CPU(cpu)->env;
 | 
			
		||||
 | 
			
		||||
    return env->pending_irq_level;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue