154 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			C
		
	
	
	
			
		
		
	
	
			154 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			C
		
	
	
	
| /*
 | |
|  *  x86 memory access helpers
 | |
|  *
 | |
|  *  Copyright (c) 2003 Fabrice Bellard
 | |
|  *
 | |
|  * This library is free software; you can redistribute it and/or
 | |
|  * modify it under the terms of the GNU Lesser General Public
 | |
|  * License as published by the Free Software Foundation; either
 | |
|  * version 2 of the License, or (at your option) any later version.
 | |
|  *
 | |
|  * This library is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | |
|  * Lesser General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU Lesser General Public
 | |
|  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 | |
|  */
 | |
| 
 | |
| #include "cpu.h"
 | |
| #include "helper.h"
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| #include "exec/softmmu_exec.h"
 | |
| #endif /* !defined(CONFIG_USER_ONLY) */
 | |
| 
 | |
| /* broken thread support */
 | |
| 
 | |
| static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
 | |
| 
 | |
| void helper_lock(void)
 | |
| {
 | |
|     spin_lock(&global_cpu_lock);
 | |
| }
 | |
| 
 | |
| void helper_unlock(void)
 | |
| {
 | |
|     spin_unlock(&global_cpu_lock);
 | |
| }
 | |
| 
 | |
| void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
 | |
| {
 | |
|     uint64_t d;
 | |
|     int eflags;
 | |
| 
 | |
|     eflags = cpu_cc_compute_all(env, CC_OP);
 | |
|     d = cpu_ldq_data(env, a0);
 | |
|     if (d == (((uint64_t)env->regs[R_EDX] << 32) | (uint32_t)env->regs[R_EAX])) {
 | |
|         cpu_stq_data(env, a0, ((uint64_t)env->regs[R_ECX] << 32) | (uint32_t)env->regs[R_EBX]);
 | |
|         eflags |= CC_Z;
 | |
|     } else {
 | |
|         /* always do the store */
 | |
|         cpu_stq_data(env, a0, d);
 | |
|         env->regs[R_EDX] = (uint32_t)(d >> 32);
 | |
|         env->regs[R_EAX] = (uint32_t)d;
 | |
|         eflags &= ~CC_Z;
 | |
|     }
 | |
|     CC_SRC = eflags;
 | |
| }
 | |
| 
 | |
| #ifdef TARGET_X86_64
 | |
| void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
 | |
| {
 | |
|     uint64_t d0, d1;
 | |
|     int eflags;
 | |
| 
 | |
|     if ((a0 & 0xf) != 0) {
 | |
|         raise_exception(env, EXCP0D_GPF);
 | |
|     }
 | |
|     eflags = cpu_cc_compute_all(env, CC_OP);
 | |
|     d0 = cpu_ldq_data(env, a0);
 | |
|     d1 = cpu_ldq_data(env, a0 + 8);
 | |
|     if (d0 == env->regs[R_EAX] && d1 == env->regs[R_EDX]) {
 | |
|         cpu_stq_data(env, a0, env->regs[R_EBX]);
 | |
|         cpu_stq_data(env, a0 + 8, env->regs[R_ECX]);
 | |
|         eflags |= CC_Z;
 | |
|     } else {
 | |
|         /* always do the store */
 | |
|         cpu_stq_data(env, a0, d0);
 | |
|         cpu_stq_data(env, a0 + 8, d1);
 | |
|         env->regs[R_EDX] = d1;
 | |
|         env->regs[R_EAX] = d0;
 | |
|         eflags &= ~CC_Z;
 | |
|     }
 | |
|     CC_SRC = eflags;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| void helper_boundw(CPUX86State *env, target_ulong a0, int v)
 | |
| {
 | |
|     int low, high;
 | |
| 
 | |
|     low = cpu_ldsw_data(env, a0);
 | |
|     high = cpu_ldsw_data(env, a0 + 2);
 | |
|     v = (int16_t)v;
 | |
|     if (v < low || v > high) {
 | |
|         raise_exception(env, EXCP05_BOUND);
 | |
|     }
 | |
| }
 | |
| 
 | |
| void helper_boundl(CPUX86State *env, target_ulong a0, int v)
 | |
| {
 | |
|     int low, high;
 | |
| 
 | |
|     low = cpu_ldl_data(env, a0);
 | |
|     high = cpu_ldl_data(env, a0 + 4);
 | |
|     if (v < low || v > high) {
 | |
|         raise_exception(env, EXCP05_BOUND);
 | |
|     }
 | |
| }
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| 
 | |
| #define MMUSUFFIX _mmu
 | |
| 
 | |
| #define SHIFT 0
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #define SHIFT 1
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #define SHIFT 2
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #define SHIFT 3
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #endif
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| /* try to fill the TLB and return an exception if error. If retaddr is
 | |
|  * NULL, it means that the function was called in C code (i.e. not
 | |
|  * from generated code or from helper.c)
 | |
|  */
 | |
| /* XXX: fix it to restore all registers */
 | |
| void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
 | |
|               uintptr_t retaddr)
 | |
| {
 | |
|     int ret;
 | |
| 
 | |
|     ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
 | |
|     if (ret) {
 | |
|         X86CPU *cpu = X86_CPU(cs);
 | |
|         CPUX86State *env = &cpu->env;
 | |
| 
 | |
|         if (retaddr) {
 | |
|             /* now we have a real cpu fault */
 | |
|             cpu_restore_state(cs, retaddr);
 | |
|         }
 | |
|         raise_exception_err(env, cs->exception_index, env->error_code);
 | |
|     }
 | |
| }
 | |
| #endif
 |