ia64 host support (David Mosberger)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1360 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
		
							parent
							
								
									7a674b1363
								
							
						
					
					
						commit
						b8076a748d
					
				|  | @ -184,7 +184,9 @@ LDFLAGS+=-Wl,-T,$(SRC_PATH)/alpha.ld | |||
| endif | ||||
| 
 | ||||
| ifeq ($(ARCH),ia64) | ||||
| CFLAGS += -mno-sdata | ||||
| OP_CFLAGS=$(CFLAGS) | ||||
| LDFLAGS+=-Wl,-G0 -Wl,-T,$(SRC_PATH)/ia64.ld | ||||
| endif | ||||
| 
 | ||||
| ifeq ($(ARCH),arm) | ||||
|  | @ -382,6 +384,10 @@ vl.o: CFLAGS+=-p | |||
| VL_LDFLAGS+=-p | ||||
| endif | ||||
| 
 | ||||
| ifeq ($(ARCH),ia64) | ||||
| VL_LDFLAGS+=-Wl,-G0 -Wl,-T,$(SRC_PATH)/ia64.ld | ||||
| endif | ||||
| 
 | ||||
| $(QEMU_SYSTEM): $(VL_OBJS) libqemu.a | ||||
| 	$(CC) $(VL_LDFLAGS) -o $@ $^ $(LIBS) $(SDL_LIBS) $(COCOA_LIBS) $(VL_LIBS) | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										43
									
								
								cpu-exec.c
								
								
								
								
							
							
						
						
									
										43
									
								
								cpu-exec.c
								
								
								
								
							|  | @ -573,6 +573,15 @@ int cpu_exec(CPUState *env1) | |||
|             ); | ||||
|     } | ||||
| } | ||||
| #elif defined(__ia64) | ||||
| 		struct fptr { | ||||
| 			void *ip; | ||||
| 			void *gp; | ||||
| 		} fp; | ||||
| 
 | ||||
| 		fp.ip = tc_ptr; | ||||
| 		fp.gp = code_gen_buffer + 2 * (1 << 20); | ||||
| 		(*(void (*)(void)) &fp)(); | ||||
| #else | ||||
|                 gen_func(); | ||||
| #endif | ||||
|  | @ -1118,6 +1127,40 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, | |||
|                              &uc->uc_sigmask, puc); | ||||
| } | ||||
| 
 | ||||
| #elif defined(__ia64) | ||||
| 
 | ||||
| #ifndef __ISR_VALID | ||||
|   /* This ought to be in <bits/siginfo.h>... */ | ||||
| # define __ISR_VALID	1 | ||||
| # define si_flags	_sifields._sigfault._si_pad0 | ||||
| #endif | ||||
| 
 | ||||
| int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc) | ||||
| { | ||||
|     struct ucontext *uc = puc; | ||||
|     unsigned long ip; | ||||
|     int is_write = 0; | ||||
| 
 | ||||
|     ip = uc->uc_mcontext.sc_ip; | ||||
|     switch (host_signum) { | ||||
|       case SIGILL: | ||||
|       case SIGFPE: | ||||
|       case SIGSEGV: | ||||
|       case SIGBUS: | ||||
|       case SIGTRAP: | ||||
| 	  if (info->si_code && (info->si_flags & __ISR_VALID)) | ||||
| 	      /* ISR.W (write-access) is bit 33:  */ | ||||
| 	      is_write = (info->si_isr >> 33) & 1; | ||||
| 	  break; | ||||
| 
 | ||||
|       default: | ||||
| 	  break; | ||||
|     } | ||||
|     return handle_cpu_signal(ip, (unsigned long)info->si_addr, | ||||
|                              is_write, | ||||
|                              &uc->uc_sigmask, puc); | ||||
| } | ||||
| 
 | ||||
| #else | ||||
| 
 | ||||
| #error host CPU specific signal handler needed | ||||
|  |  | |||
							
								
								
									
										9
									
								
								disas.c
								
								
								
								
							
							
						
						
									
										9
									
								
								disas.c
								
								
								
								
							|  | @ -143,7 +143,8 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags) | |||
| #elif defined(TARGET_PPC) | ||||
|     print_insn = print_insn_ppc; | ||||
| #else | ||||
|     fprintf(out, "Asm output not supported on this arch\n"); | ||||
|     fprintf(out, "0x" TARGET_FMT_lx | ||||
| 	    ": Asm output not supported on this arch\n", code); | ||||
|     return; | ||||
| #endif | ||||
| 
 | ||||
|  | @ -202,7 +203,8 @@ void disas(FILE *out, void *code, unsigned long size) | |||
| #elif defined(__arm__)  | ||||
|     print_insn = print_insn_arm; | ||||
| #else | ||||
|     fprintf(out, "Asm output not supported on this arch\n"); | ||||
|     fprintf(out, "0x%lx: Asm output not supported on this arch\n", | ||||
| 	    (long) code); | ||||
|     return; | ||||
| #endif | ||||
|     for (pc = (unsigned long)code; pc < (unsigned long)code + size; pc += count) { | ||||
|  | @ -311,7 +313,8 @@ void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags) | |||
| #elif defined(TARGET_PPC) | ||||
|     print_insn = print_insn_ppc; | ||||
| #else | ||||
|     term_printf("Asm output not supported on this arch\n"); | ||||
|     term_printf("0x" TARGET_FMT_lx | ||||
| 		": Asm output not supported on this arch\n", pc); | ||||
|     return; | ||||
| #endif | ||||
| 
 | ||||
|  |  | |||
|  | @ -29,7 +29,7 @@ typedef unsigned char uint8_t; | |||
| typedef unsigned short uint16_t; | ||||
| typedef unsigned int uint32_t; | ||||
| /* XXX may be done for all 64 bits targets ? */ | ||||
| #if defined (__x86_64__) | ||||
| #if defined (__x86_64__) || defined(__ia64) | ||||
| typedef unsigned long uint64_t; | ||||
| #else | ||||
| typedef unsigned long long uint64_t; | ||||
|  | @ -38,7 +38,7 @@ typedef unsigned long long uint64_t; | |||
| typedef signed char int8_t; | ||||
| typedef signed short int16_t; | ||||
| typedef signed int int32_t; | ||||
| #if defined (__x86_64__) | ||||
| #if defined (__x86_64__) || defined(__ia64) | ||||
| typedef signed long int64_t; | ||||
| #else | ||||
| typedef signed long long int64_t; | ||||
|  | @ -148,10 +148,10 @@ extern int printf(const char *, ...); | |||
| #define AREG4 "%d5" | ||||
| #endif | ||||
| #ifdef __ia64__ | ||||
| #define AREG0 "r27" | ||||
| #define AREG1 "r24" | ||||
| #define AREG2 "r25" | ||||
| #define AREG3 "r26" | ||||
| #define AREG0 "r7" | ||||
| #define AREG1 "r4" | ||||
| #define AREG2 "r5" | ||||
| #define AREG3 "r6" | ||||
| #endif | ||||
| 
 | ||||
| /* force GCC to generate only one epilog at the end of the function */ | ||||
|  | @ -224,6 +224,8 @@ extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3; | |||
| #endif | ||||
| #ifdef __ia64__ | ||||
| #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;") | ||||
| #define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \ | ||||
| 					  ASM_NAME(__op_gen_label) #n) | ||||
| #endif | ||||
| #ifdef __sparc__ | ||||
| #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0\n" \ | ||||
|  |  | |||
							
								
								
									
										190
									
								
								dyngen.c
								
								
								
								
							
							
						
						
									
										190
									
								
								dyngen.c
								
								
								
								
							|  | @ -1203,6 +1203,48 @@ void get_reloc_expr(char *name, int name_size, const char *sym_name) | |||
|     } | ||||
| } | ||||
| 
 | ||||
| #ifdef HOST_IA64 | ||||
| 
 | ||||
| #define PLT_ENTRY_SIZE	16	/* 1 bundle containing "brl" */ | ||||
| 
 | ||||
| struct plt_entry { | ||||
|     struct plt_entry *next; | ||||
|     const char *name; | ||||
|     unsigned long addend; | ||||
| } *plt_list; | ||||
| 
 | ||||
| static int | ||||
| get_plt_index (const char *name, unsigned long addend) | ||||
| { | ||||
|     struct plt_entry *plt, *prev= NULL; | ||||
|     int index = 0; | ||||
| 
 | ||||
|     /* see if we already have an entry for this target: */ | ||||
|     for (plt = plt_list; plt; ++index, prev = plt, plt = plt->next) | ||||
| 	if (strcmp(plt->name, name) == 0 && plt->addend == addend) | ||||
| 	    return index; | ||||
| 
 | ||||
|     /* nope; create a new PLT entry: */ | ||||
| 
 | ||||
|     plt = malloc(sizeof(*plt)); | ||||
|     if (!plt) { | ||||
| 	perror("malloc"); | ||||
| 	exit(1); | ||||
|     } | ||||
|     memset(plt, 0, sizeof(*plt)); | ||||
|     plt->name = strdup(name); | ||||
|     plt->addend = addend; | ||||
| 
 | ||||
|     /* append to plt-list: */ | ||||
|     if (prev) | ||||
| 	prev->next = plt; | ||||
|     else | ||||
| 	plt_list = plt; | ||||
|     return index; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| #ifdef HOST_ARM | ||||
| 
 | ||||
| int arm_emit_ldr_info(const char *name, unsigned long start_offset, | ||||
|  | @ -1392,7 +1434,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | |||
|         /* 08 00 84 00 */ | ||||
|         if (get32((uint32_t *)p) != 0x00840008) | ||||
|             error("br.ret.sptk.many b0;; expected at the end of %s", name); | ||||
|         copy_size = p - p_start; | ||||
| 	copy_size = p_end - p_start; | ||||
|     } | ||||
| #elif defined(HOST_SPARC) | ||||
|     { | ||||
|  | @ -1529,7 +1571,11 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | |||
|             } | ||||
|             fprintf(outfile, ";\n"); | ||||
|         } | ||||
| #if defined(HOST_IA64) | ||||
|         fprintf(outfile, "    extern char %s;\n", name); | ||||
| #else | ||||
|         fprintf(outfile, "    extern void %s();\n", name); | ||||
| #endif | ||||
| 
 | ||||
|         for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | ||||
|             host_ulong offset = get_rel_offset(rel); | ||||
|  | @ -1550,9 +1596,18 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | |||
| 			continue; | ||||
| 		    } | ||||
| #endif | ||||
| #ifdef __APPLE__ | ||||
| #if defined(__APPLE__) | ||||
| /* set __attribute((unused)) on darwin because we wan't to avoid warning when we don't use the symbol */ | ||||
|                     fprintf(outfile, "extern char %s __attribute__((unused));\n", sym_name); | ||||
| #elif defined(HOST_IA64) | ||||
| 			if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) | ||||
| 				/*
 | ||||
| 				 * PCREL21 br.call targets generally | ||||
| 				 * are out of range and need to go | ||||
| 				 * through an "import stub". | ||||
| 				 */ | ||||
| 				fprintf(outfile, "    extern char %s;\n", | ||||
| 					sym_name); | ||||
| #else | ||||
|                     fprintf(outfile, "extern char %s;\n", sym_name); | ||||
| #endif | ||||
|  | @ -1964,25 +2019,78 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | |||
|             } | ||||
| #elif defined(HOST_IA64) | ||||
|             { | ||||
| 		unsigned long sym_idx; | ||||
| 		long code_offset; | ||||
|                 char name[256]; | ||||
|                 int type; | ||||
|                 int addend; | ||||
|                 long addend; | ||||
| 
 | ||||
|                 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | ||||
|                     if (rel->r_offset >= start_offset && rel->r_offset < start_offset + copy_size) { | ||||
|                         sym_name = strtab + symtab[ELF64_R_SYM(rel->r_info)].st_name; | ||||
| 		    sym_idx = ELF64_R_SYM(rel->r_info); | ||||
|                     if (rel->r_offset < start_offset | ||||
| 			|| rel->r_offset >= start_offset + copy_size) | ||||
| 			continue; | ||||
| 		    sym_name = (strtab + symtab[sym_idx].st_name); | ||||
| 		    if (strstart(sym_name, "__op_jmp", &p)) { | ||||
| 			int n; | ||||
| 			n = strtol(p, NULL, 10); | ||||
| 			/* __op_jmp relocations are done at
 | ||||
| 			   runtime to do translated block | ||||
| 			   chaining: the offset of the instruction | ||||
| 			   needs to be stored */ | ||||
| 			fprintf(outfile, "    jmp_offsets[%d] =" | ||||
| 				"%ld + (gen_code_ptr - gen_code_buf);\n", | ||||
| 				n, rel->r_offset - start_offset); | ||||
| 			continue; | ||||
| 		    } | ||||
| 		    get_reloc_expr(name, sizeof(name), sym_name); | ||||
| 		    type = ELF64_R_TYPE(rel->r_info); | ||||
| 		    addend = rel->r_addend; | ||||
| 		    code_offset = rel->r_offset - start_offset; | ||||
| 		    switch(type) { | ||||
| 		      case R_IA64_IMM64: | ||||
| 			  fprintf(outfile, | ||||
| 				  "    ia64_imm64(gen_code_ptr + %ld, " | ||||
| 				  "%s + %ld);\n", | ||||
| 				  code_offset, name, addend); | ||||
| 			  break; | ||||
| 		      case R_IA64_LTOFF22X: | ||||
| 		      case R_IA64_LTOFF22: | ||||
| 			    error("must implemnt R_IA64_LTOFF22 relocation"); | ||||
| 			  fprintf(outfile, "    IA64_LTOFF(gen_code_ptr + %ld," | ||||
| 				  " %s + %ld, %d);\n", | ||||
| 				  code_offset, name, addend, | ||||
| 				  (type == R_IA64_LTOFF22X)); | ||||
| 			  break; | ||||
| 		      case R_IA64_LDXMOV: | ||||
| 			  fprintf(outfile, | ||||
| 				  "    ia64_ldxmov(gen_code_ptr + %ld," | ||||
| 				  " %s + %ld);\n", code_offset, name, addend); | ||||
| 			  break; | ||||
| 
 | ||||
| 		      case R_IA64_PCREL21B: | ||||
| 			    error("must implemnt R_IA64_PCREL21B relocation"); | ||||
| 			  if (strstart(sym_name, "__op_gen_label", NULL)) { | ||||
| 			      fprintf(outfile, | ||||
| 				      "    ia64_imm21b(gen_code_ptr + %ld," | ||||
| 				      " (long) (%s + %ld -\n\t\t" | ||||
| 				      "((long) gen_code_ptr + %ld)) >> 4);\n", | ||||
| 				      code_offset, name, addend, | ||||
| 				      code_offset & ~0xfUL); | ||||
| 			  } else { | ||||
| 			      fprintf(outfile, | ||||
| 				      "    IA64_PLT(gen_code_ptr + %ld, " | ||||
| 				      "%d);\t/* %s + %ld */\n", | ||||
| 				      code_offset, | ||||
| 				      get_plt_index(sym_name, addend), | ||||
| 				      sym_name, addend); | ||||
| 			  } | ||||
| 			  break; | ||||
| 		      default: | ||||
|                             error("unsupported ia64 relocation (%d)", type); | ||||
|                         } | ||||
| 			  error("unsupported ia64 relocation (0x%x)", | ||||
| 				type); | ||||
| 		    } | ||||
|                 } | ||||
| 		fprintf(outfile, "    ia64_nop_b(gen_code_ptr + %d);\n", | ||||
| 			copy_size - 16 + 2); | ||||
|             } | ||||
| #elif defined(HOST_SPARC) | ||||
|             { | ||||
|  | @ -2236,6 +2344,63 @@ fprintf(outfile, | |||
| "    LDREntry *arm_ldr_ptr = arm_ldr_table;\n" | ||||
| "    uint32_t *arm_data_ptr = arm_data_table;\n"); | ||||
| #endif | ||||
| #ifdef HOST_IA64 | ||||
|     { | ||||
| 	long addend, not_first = 0; | ||||
| 	unsigned long sym_idx; | ||||
| 	int index, max_index; | ||||
| 	const char *sym_name; | ||||
| 	EXE_RELOC *rel; | ||||
| 
 | ||||
| 	max_index = -1; | ||||
| 	for (i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | ||||
| 	    sym_idx = ELF64_R_SYM(rel->r_info); | ||||
| 	    sym_name = (strtab + symtab[sym_idx].st_name); | ||||
| 	    if (strstart(sym_name, "__op_gen_label", NULL)) | ||||
| 		continue; | ||||
| 	    if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) | ||||
| 		continue; | ||||
| 
 | ||||
| 	    addend = rel->r_addend; | ||||
| 	    index = get_plt_index(sym_name, addend); | ||||
| 	    if (index <= max_index) | ||||
| 		continue; | ||||
| 	    max_index = index; | ||||
| 	    fprintf(outfile, "    extern void %s(void);\n", sym_name); | ||||
| 	} | ||||
| 
 | ||||
| 	fprintf(outfile, | ||||
| 		"    struct ia64_fixup *plt_fixes = NULL, " | ||||
| 		"*ltoff_fixes = NULL;\n" | ||||
| 		"    static long plt_target[] = {\n\t"); | ||||
| 
 | ||||
| 	max_index = -1; | ||||
| 	for (i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | ||||
| 	    sym_idx = ELF64_R_SYM(rel->r_info); | ||||
| 	    sym_name = (strtab + symtab[sym_idx].st_name); | ||||
| 	    if (strstart(sym_name, "__op_gen_label", NULL)) | ||||
| 		continue; | ||||
| 	    if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) | ||||
| 		continue; | ||||
| 
 | ||||
| 	    addend = rel->r_addend; | ||||
| 	    index = get_plt_index(sym_name, addend); | ||||
| 	    if (index <= max_index) | ||||
| 		continue; | ||||
| 	    max_index = index; | ||||
| 
 | ||||
| 	    if (not_first) | ||||
| 		fprintf(outfile, ",\n\t"); | ||||
| 	    not_first = 1; | ||||
| 	    if (addend) | ||||
| 		fprintf(outfile, "(long) &%s + %ld", sym_name, addend); | ||||
| 	    else | ||||
| 		fprintf(outfile, "(long) &%s", sym_name); | ||||
| 	} | ||||
| 	fprintf(outfile, "\n    };\n" | ||||
| 	    "    unsigned int plt_offset[%u] = { 0 };\n", max_index + 1); | ||||
|     } | ||||
| #endif | ||||
| 
 | ||||
| fprintf(outfile, | ||||
| "\n" | ||||
|  | @ -2298,6 +2463,13 @@ fprintf(outfile, | |||
| "    }\n" | ||||
| " the_end:\n" | ||||
| ); | ||||
| #ifdef HOST_IA64 | ||||
|     fprintf(outfile, | ||||
| 	    "    ia64_apply_fixes(&gen_code_ptr, ltoff_fixes, " | ||||
| 	    "(uint64_t) code_gen_buffer + 2*(1<<20), plt_fixes,\n\t\t\t" | ||||
| 	    "sizeof(plt_target)/sizeof(plt_target[0]),\n\t\t\t" | ||||
| 	    "plt_target, plt_offset);\n"); | ||||
| #endif | ||||
| 
 | ||||
| /* generate some code patching */  | ||||
| #ifdef HOST_ARM | ||||
|  |  | |||
							
								
								
									
										220
									
								
								dyngen.h
								
								
								
								
							
							
						
						
									
										220
									
								
								dyngen.h
								
								
								
								
							|  | @ -43,6 +43,11 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop) | |||
| #ifdef __ia64__ | ||||
| static inline void flush_icache_range(unsigned long start, unsigned long stop) | ||||
| { | ||||
|     while (start < stop) { | ||||
| 	asm volatile ("fc %0" :: "r"(start)); | ||||
| 	start += 32; | ||||
|     } | ||||
|     asm volatile (";;sync.i;;srlz.i;;"); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
|  | @ -204,3 +209,218 @@ static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr, | |||
| } | ||||
| 
 | ||||
| #endif /* __arm__ */ | ||||
| 
 | ||||
| #ifdef __ia64 | ||||
| 
 | ||||
| 
 | ||||
| /* Patch instruction with "val" where "mask" has 1 bits. */ | ||||
| static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val) | ||||
| { | ||||
|     uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16); | ||||
| #   define insn_mask ((1UL << 41) - 1) | ||||
|     unsigned long shift; | ||||
| 
 | ||||
|     b0 = b[0]; b1 = b[1]; | ||||
|     shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */ | ||||
|     if (shift >= 64) { | ||||
| 	m1 = mask << (shift - 64); | ||||
| 	v1 = val << (shift - 64); | ||||
|     } else { | ||||
| 	m0 = mask << shift; m1 = mask >> (64 - shift); | ||||
| 	v0 = val  << shift; v1 = val >> (64 - shift); | ||||
| 	b[0] = (b0 & ~m0) | (v0 & m0); | ||||
|     } | ||||
|     b[1] = (b1 & ~m1) | (v1 & m1); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val) | ||||
| { | ||||
| 	ia64_patch(insn_addr, | ||||
| 		   0x011ffffe000UL, | ||||
| 		   (  ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | ||||
| 		    | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); | ||||
| 	ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_imm64 (void *insn, uint64_t val) | ||||
| { | ||||
|     /* Ignore the slot number of the relocation; GCC and Intel
 | ||||
|        toolchains differed for some time on whether IMM64 relocs are | ||||
|        against slot 1 (Intel) or slot 2 (GCC).  */ | ||||
|     uint64_t insn_addr = (uint64_t) insn & ~3UL; | ||||
| 
 | ||||
|     ia64_patch(insn_addr + 2, | ||||
| 	       0x01fffefe000UL, | ||||
| 	       (  ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | ||||
| 		| ((val & 0x0000000000200000UL) <<  0) /* bit 21 -> 21 */ | ||||
| 		| ((val & 0x00000000001f0000UL) <<  6) /* bit 16 -> 22 */ | ||||
| 		| ((val & 0x000000000000ff80UL) << 20) /* bit  7 -> 27 */ | ||||
| 		| ((val & 0x000000000000007fUL) << 13) /* bit  0 -> 13 */) | ||||
| 	    ); | ||||
|     ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_imm60b (void *insn, uint64_t val) | ||||
| { | ||||
|     /* Ignore the slot number of the relocation; GCC and Intel
 | ||||
|        toolchains differed for some time on whether IMM64 relocs are | ||||
|        against slot 1 (Intel) or slot 2 (GCC).  */ | ||||
|     uint64_t insn_addr = (uint64_t) insn & ~3UL; | ||||
| 
 | ||||
|     if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) | ||||
| 	fprintf(stderr, "%s: value %ld out of IMM60 range\n", | ||||
| 		__FUNCTION__, (int64_t) val); | ||||
|     ia64_patch_imm60(insn_addr + 2, val); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_imm22 (void *insn, uint64_t val) | ||||
| { | ||||
|     if (val + (1 << 21) >= (1 << 22)) | ||||
| 	fprintf(stderr, "%s: value %li out of IMM22 range\n", | ||||
| 		__FUNCTION__, (int64_t)val); | ||||
|     ia64_patch((uint64_t) insn, 0x01fffcfe000UL, | ||||
| 	       (  ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | ||||
| 		| ((val & 0x1f0000UL) <<  6) /* bit 16 -> 22 */ | ||||
| 		| ((val & 0x00ff80UL) << 20) /* bit  7 -> 27 */ | ||||
| 		| ((val & 0x00007fUL) << 13) /* bit  0 -> 13 */)); | ||||
| } | ||||
| 
 | ||||
| /* Like ia64_imm22(), but also clear bits 20-21.  For addl, this has
 | ||||
|    the effect of turning "addl rX=imm22,rY" into "addl | ||||
|    rX=imm22,r0".  */ | ||||
| static inline void ia64_imm22_r0 (void *insn, uint64_t val) | ||||
| { | ||||
|     if (val + (1 << 21) >= (1 << 22)) | ||||
| 	fprintf(stderr, "%s: value %li out of IMM22 range\n", | ||||
| 		__FUNCTION__, (int64_t)val); | ||||
|     ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20), | ||||
| 	       (  ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | ||||
| 		| ((val & 0x1f0000UL) <<  6) /* bit 16 -> 22 */ | ||||
| 		| ((val & 0x00ff80UL) << 20) /* bit  7 -> 27 */ | ||||
| 		| ((val & 0x00007fUL) << 13) /* bit  0 -> 13 */)); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_imm21b (void *insn, uint64_t val) | ||||
| { | ||||
|     if (val + (1 << 20) >= (1 << 21)) | ||||
| 	fprintf(stderr, "%s: value %li out of IMM21b range\n", | ||||
| 		__FUNCTION__, (int64_t)val); | ||||
|     ia64_patch((uint64_t) insn, 0x11ffffe000UL, | ||||
| 	       (  ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ | ||||
| 		| ((val & 0x0fffffUL) << 13) /* bit  0 -> 13 */)); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_nop_b (void *insn) | ||||
| { | ||||
|     ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37); | ||||
| } | ||||
| 
 | ||||
| static inline void ia64_ldxmov(void *insn, uint64_t val) | ||||
| { | ||||
|     if (val + (1 << 21) < (1 << 22)) | ||||
| 	ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37); | ||||
| } | ||||
| 
 | ||||
| static inline int ia64_patch_ltoff(void *insn, uint64_t val, | ||||
| 				   int relaxable) | ||||
| { | ||||
|     if (relaxable && (val + (1 << 21) < (1 << 22))) { | ||||
| 	ia64_imm22_r0(insn, val); | ||||
| 	return 0; | ||||
|     } | ||||
|     return 1; | ||||
| } | ||||
| 
 | ||||
| struct ia64_fixup { | ||||
|     struct ia64_fixup *next; | ||||
|     void *addr;			/* address that needs to be patched */ | ||||
|     long value; | ||||
| }; | ||||
| 
 | ||||
| #define IA64_PLT(insn, plt_index)			\ | ||||
| do {							\ | ||||
|     struct ia64_fixup *fixup = alloca(sizeof(*fixup));	\ | ||||
|     fixup->next = plt_fixes;				\ | ||||
|     plt_fixes = fixup;					\ | ||||
|     fixup->addr = (insn);				\ | ||||
|     fixup->value = (plt_index);				\ | ||||
|     plt_offset[(plt_index)] = 1;			\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define IA64_LTOFF(insn, val, relaxable)			\ | ||||
| do {								\ | ||||
|     if (ia64_patch_ltoff(insn, val, relaxable)) {		\ | ||||
| 	struct ia64_fixup *fixup = alloca(sizeof(*fixup));	\ | ||||
| 	fixup->next = ltoff_fixes;				\ | ||||
| 	ltoff_fixes = fixup;					\ | ||||
| 	fixup->addr = (insn);					\ | ||||
| 	fixup->value = (val);					\ | ||||
|     }								\ | ||||
| } while (0) | ||||
| 
 | ||||
| static inline void ia64_apply_fixes (uint8_t **gen_code_pp, | ||||
| 				     struct ia64_fixup *ltoff_fixes, | ||||
| 				     uint64_t gp, | ||||
| 				     struct ia64_fixup *plt_fixes, | ||||
| 				     int num_plts, | ||||
| 				     unsigned long *plt_target, | ||||
| 				     unsigned int *plt_offset) | ||||
| { | ||||
|     static const uint8_t plt_bundle[] = { | ||||
| 	0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,	/* nop 0; movl r1=GP */ | ||||
| 	0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60, | ||||
| 
 | ||||
| 	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,	/* nop 0; brl IP */ | ||||
| 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0 | ||||
|     }; | ||||
|     uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start, *vp; | ||||
|     struct ia64_fixup *fixup; | ||||
|     unsigned int offset = 0; | ||||
|     struct fdesc { | ||||
| 	long ip; | ||||
| 	long gp; | ||||
|     } *fdesc; | ||||
|     int i; | ||||
| 
 | ||||
|     if (plt_fixes) { | ||||
| 	plt_start = gen_code_ptr; | ||||
| 
 | ||||
| 	for (i = 0; i < num_plts; ++i) { | ||||
| 	    if (plt_offset[i]) { | ||||
| 		plt_offset[i] = offset; | ||||
| 		offset += sizeof(plt_bundle); | ||||
| 
 | ||||
| 		fdesc = (struct fdesc *) plt_target[i]; | ||||
| 		memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle)); | ||||
| 		ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp); | ||||
| 		ia64_imm60b(gen_code_ptr + 0x12, | ||||
| 			    (fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4); | ||||
| 		gen_code_ptr += sizeof(plt_bundle); | ||||
| 	    } | ||||
| 	} | ||||
| 
 | ||||
| 	for (fixup = plt_fixes; fixup; fixup = fixup->next) | ||||
| 	    ia64_imm21b(fixup->addr, | ||||
| 			((long) plt_start + plt_offset[fixup->value] | ||||
| 			 - ((long) fixup->addr & ~0xf)) >> 4); | ||||
|     } | ||||
| 
 | ||||
|     got_start = gen_code_ptr; | ||||
| 
 | ||||
|     /* First, create the GOT: */ | ||||
|     for (fixup = ltoff_fixes; fixup; fixup = fixup->next) { | ||||
| 	/* first check if we already have this value in the GOT: */ | ||||
| 	for (vp = got_start; vp < gen_code_ptr; ++vp) | ||||
| 	    if (*(uint64_t *) vp == fixup->value) | ||||
| 		break; | ||||
| 	if (vp == gen_code_ptr) { | ||||
| 	    /* Nope, we need to put the value in the GOT: */ | ||||
| 	    *(uint64_t *) vp = fixup->value; | ||||
| 	    gen_code_ptr += 8; | ||||
| 	} | ||||
| 	ia64_imm22(fixup->addr, (long) vp - gp); | ||||
|     } | ||||
|     *gen_code_pp = gen_code_ptr; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
							
								
								
									
										11
									
								
								exec-all.h
								
								
								
								
							
							
						
						
									
										11
									
								
								exec-all.h
								
								
								
								
							|  | @ -126,6 +126,8 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | |||
| 
 | ||||
| #if defined(__alpha__) | ||||
| #define CODE_GEN_BUFFER_SIZE     (2 * 1024 * 1024) | ||||
| #elif defined(__ia64) | ||||
| #define CODE_GEN_BUFFER_SIZE     (4 * 1024 * 1024)	/* range of addl */ | ||||
| #elif defined(__powerpc__) | ||||
| #define CODE_GEN_BUFFER_SIZE     (6 * 1024 * 1024) | ||||
| #else | ||||
|  | @ -487,6 +489,15 @@ static inline int testandset (int *p) | |||
| } | ||||
| #endif | ||||
| 
 | ||||
| #ifdef __ia64 | ||||
| #include <ia64intrin.h> | ||||
| 
 | ||||
| static inline int testandset (int *p) | ||||
| { | ||||
|     return __sync_lock_test_and_set (p, 1); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| typedef int spinlock_t; | ||||
| 
 | ||||
| #define SPIN_LOCK_UNLOCKED 0 | ||||
|  |  | |||
							
								
								
									
										2
									
								
								exec.c
								
								
								
								
							
							
						
						
									
										2
									
								
								exec.c
								
								
								
								
							|  | @ -58,7 +58,7 @@ int nb_tbs; | |||
| /* any access to the tbs or the page table must use this lock */ | ||||
| spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | ||||
| 
 | ||||
| uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; | ||||
| uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32))); | ||||
| uint8_t *code_gen_ptr; | ||||
| 
 | ||||
| int phys_ram_size; | ||||
|  |  | |||
|  | @ -0,0 +1,211 @@ | |||
| /* Default linker script, for normal executables */ | ||||
| OUTPUT_FORMAT("elf64-ia64-little", "elf64-ia64-little", | ||||
| 	      "elf64-ia64-little") | ||||
| OUTPUT_ARCH(ia64) | ||||
| ENTRY(_start) | ||||
| SEARCH_DIR("/usr/ia64-linux/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib"); | ||||
| /* Do we need any of these for elf? | ||||
|    __DYNAMIC = 0;    */ | ||||
| SECTIONS | ||||
| { | ||||
|   /* Read-only sections, merged into text segment: */ | ||||
|   PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS; | ||||
|   .interp         : { *(.interp) } | ||||
|   .hash           : { *(.hash) } | ||||
|   .dynsym         : { *(.dynsym) } | ||||
|   .dynstr         : { *(.dynstr) } | ||||
|   .gnu.version    : { *(.gnu.version) } | ||||
|   .gnu.version_d  : { *(.gnu.version_d) } | ||||
|   .gnu.version_r  : { *(.gnu.version_r) } | ||||
|   .rel.init       : { *(.rel.init) } | ||||
|   .rela.init      : { *(.rela.init) } | ||||
|   .rel.text       : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } | ||||
|   .rela.text      : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } | ||||
|   .rel.fini       : { *(.rel.fini) } | ||||
|   .rela.fini      : { *(.rela.fini) } | ||||
|   .rel.rodata     : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } | ||||
|   .rela.rodata    : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } | ||||
|   .rel.data       : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } | ||||
|   .rela.data      : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } | ||||
|   .rel.tdata	  : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } | ||||
|   .rela.tdata	  : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } | ||||
|   .rel.tbss	  : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } | ||||
|   .rela.tbss	  : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } | ||||
|   .rel.ctors      : { *(.rel.ctors) } | ||||
|   .rela.ctors     : { *(.rela.ctors) } | ||||
|   .rel.dtors      : { *(.rel.dtors) } | ||||
|   .rela.dtors     : { *(.rela.dtors) } | ||||
|   .rel.got        : { *(.rel.got) } | ||||
|   .rela.got       : { *(.rela.got) } | ||||
|   .rel.sdata      : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) } | ||||
|   .rela.sdata     : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) } | ||||
|   .rel.sbss       : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) } | ||||
|   .rela.sbss      : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) } | ||||
|   .rel.sdata2     : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) } | ||||
|   .rela.sdata2    : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) } | ||||
|   .rel.sbss2      : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) } | ||||
|   .rela.sbss2     : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) } | ||||
|   .rel.bss        : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } | ||||
|   .rela.bss       : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } | ||||
|   .rel.plt        : { *(.rel.plt) } | ||||
|   .rela.plt       : { *(.rela.plt) } | ||||
|   .rela.IA_64.pltoff   : { *(.rela.IA_64.pltoff) } | ||||
|   .init           : | ||||
|   { | ||||
|     KEEP (*(.init)) | ||||
|   } =0x00300000010070000002000001000400 | ||||
|   .plt            : { *(.plt) } | ||||
|   .text           : | ||||
|   { | ||||
|     *(.text .stub .text.* .gnu.linkonce.t.*) | ||||
|     /* .gnu.warning sections are handled specially by elf32.em.  */ | ||||
|     *(.gnu.warning) | ||||
|   } =0x00300000010070000002000001000400 | ||||
|   .fini           : | ||||
|   { | ||||
|     KEEP (*(.fini)) | ||||
|   } =0x00300000010070000002000001000400 | ||||
|   PROVIDE (__etext = .); | ||||
|   PROVIDE (_etext = .); | ||||
|   PROVIDE (etext = .); | ||||
|   .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||||
|   .rodata1        : { *(.rodata1) } | ||||
|   .sdata2         : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) } | ||||
|   .sbss2          : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) } | ||||
|   .opd            : { *(.opd) } | ||||
|   .IA_64.unwind_info   : { *(.IA_64.unwind_info* .gnu.linkonce.ia64unwi.*) } | ||||
|   .IA_64.unwind   : { *(.IA_64.unwind* .gnu.linkonce.ia64unw.*) } | ||||
|   .eh_frame_hdr : { *(.eh_frame_hdr) } | ||||
|   /* Adjust the address for the data segment.  We want to adjust up to | ||||
|      the same address within the page on the next page up.  */ | ||||
|   . = ALIGN(0x10000) + (. & (0x10000 - 1)); | ||||
|   /* Ensure the __preinit_array_start label is properly aligned.  We | ||||
|      could instead move the label definition inside the section, but | ||||
|      the linker would then create the section even if it turns out to | ||||
|      be empty, which isn't pretty.  */ | ||||
|   . = ALIGN(64 / 8); | ||||
|   PROVIDE (__preinit_array_start = .); | ||||
|   .preinit_array     : { *(.preinit_array) } | ||||
|   PROVIDE (__preinit_array_end = .); | ||||
|   PROVIDE (__init_array_start = .); | ||||
|   .init_array     : { *(.init_array) } | ||||
|   PROVIDE (__init_array_end = .); | ||||
|   PROVIDE (__fini_array_start = .); | ||||
|   .fini_array     : { *(.fini_array) } | ||||
|   PROVIDE (__fini_array_end = .); | ||||
|   .data           : | ||||
|   { | ||||
|     *(.data .data.* .gnu.linkonce.d.*) | ||||
|     SORT(CONSTRUCTORS) | ||||
|   } | ||||
|   .data1          : { *(.data1) } | ||||
|   .tdata	  : { *(.tdata .tdata.* .gnu.linkonce.td.*) } | ||||
|   .tbss		  : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } | ||||
|   .eh_frame       : { KEEP (*(.eh_frame)) } | ||||
|   .gcc_except_table   : { *(.gcc_except_table) } | ||||
|   .dynamic        : { *(.dynamic) } | ||||
|   .ctors          : | ||||
|   { | ||||
|     /* gcc uses crtbegin.o to find the start of | ||||
|        the constructors, so we make sure it is | ||||
|        first.  Because this is a wildcard, it | ||||
|        doesn't matter if the user does not | ||||
|        actually link against crtbegin.o; the | ||||
|        linker won't look for a file to match a | ||||
|        wildcard.  The wildcard also means that it | ||||
|        doesn't matter which directory crtbegin.o | ||||
|        is in.  */ | ||||
|     KEEP (*crtbegin*.o(.ctors)) | ||||
|     /* We don't want to include the .ctor section from | ||||
|        from the crtend.o file until after the sorted ctors. | ||||
|        The .ctor section from the crtend file contains the | ||||
|        end of ctors marker and it must be last */ | ||||
|     KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors)) | ||||
|     KEEP (*(SORT(.ctors.*))) | ||||
|     KEEP (*(.ctors)) | ||||
|   } | ||||
|   .dtors          : | ||||
|   { | ||||
|     KEEP (*crtbegin*.o(.dtors)) | ||||
|     KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors)) | ||||
|     KEEP (*(SORT(.dtors.*))) | ||||
|     KEEP (*(.dtors)) | ||||
|   } | ||||
|   .jcr            : { KEEP (*(.jcr)) } | ||||
|   /* Ensure __gp is outside the range of any normal data.  We need to | ||||
|      do this to avoid the linker optimizing the code in op.o and getting | ||||
|      it out of sync with the relocs that we read when processing that | ||||
|      file.  A better solution might be to ensure that the dynamically | ||||
|      generated code and static qemu code share a single gp-value.  */ | ||||
|   __gp = . + 0x200000; | ||||
|   .got            : { *(.got.plt) *(.got) } | ||||
|   .IA_64.pltoff   : { *(.IA_64.pltoff) } | ||||
|   /* We want the small data sections together, so single-instruction offsets | ||||
|      can access them all, and initialized data all before uninitialized, so | ||||
|      we can shorten the on-disk segment size.  */ | ||||
|   .sdata          : | ||||
|   { | ||||
|     *(.sdata .sdata.* .gnu.linkonce.s.*) | ||||
|   } | ||||
|   _edata = .; | ||||
|   PROVIDE (edata = .); | ||||
|   __bss_start = .; | ||||
|   .sbss           : | ||||
|   { | ||||
|     PROVIDE (__sbss_start = .); | ||||
|     PROVIDE (___sbss_start = .); | ||||
|     *(.dynsbss) | ||||
|     *(.sbss .sbss.* .gnu.linkonce.sb.*) | ||||
|     *(.scommon) | ||||
|     PROVIDE (__sbss_end = .); | ||||
|     PROVIDE (___sbss_end = .); | ||||
|   } | ||||
|   .bss            : | ||||
|   { | ||||
|    . += 0x400000;	/* ensure .bss stuff is out of reach of gp */ | ||||
|    *(.dynbss) | ||||
|    *(.bss .bss.* .gnu.linkonce.b.*) | ||||
|    *(COMMON) | ||||
|    /* Align here to ensure that the .bss section occupies space up to | ||||
|       _end.  Align after .bss to ensure correct alignment even if the | ||||
|       .bss section disappears because there are no input sections.  */ | ||||
|    . = ALIGN(64 / 8); | ||||
|   } | ||||
|   . = ALIGN(64 / 8); | ||||
|   _end = .; | ||||
|   PROVIDE (end = .); | ||||
|   /* Stabs debugging sections.  */ | ||||
|   .stab          0 : { *(.stab) } | ||||
|   .stabstr       0 : { *(.stabstr) } | ||||
|   .stab.excl     0 : { *(.stab.excl) } | ||||
|   .stab.exclstr  0 : { *(.stab.exclstr) } | ||||
|   .stab.index    0 : { *(.stab.index) } | ||||
|   .stab.indexstr 0 : { *(.stab.indexstr) } | ||||
|   .comment       0 : { *(.comment) } | ||||
|   /* DWARF debug sections. | ||||
|      Symbols in the DWARF debugging sections are relative to the beginning | ||||
|      of the section so we begin them at 0.  */ | ||||
|   /* DWARF 1 */ | ||||
|   .debug          0 : { *(.debug) } | ||||
|   .line           0 : { *(.line) } | ||||
|   /* GNU DWARF 1 extensions */ | ||||
|   .debug_srcinfo  0 : { *(.debug_srcinfo) } | ||||
|   .debug_sfnames  0 : { *(.debug_sfnames) } | ||||
|   /* DWARF 1.1 and DWARF 2 */ | ||||
|   .debug_aranges  0 : { *(.debug_aranges) } | ||||
|   .debug_pubnames 0 : { *(.debug_pubnames) } | ||||
|   /* DWARF 2 */ | ||||
|   .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||||
|   .debug_abbrev   0 : { *(.debug_abbrev) } | ||||
|   .debug_line     0 : { *(.debug_line) } | ||||
|   .debug_frame    0 : { *(.debug_frame) } | ||||
|   .debug_str      0 : { *(.debug_str) } | ||||
|   .debug_loc      0 : { *(.debug_loc) } | ||||
|   .debug_macinfo  0 : { *(.debug_macinfo) } | ||||
|   /* SGI/MIPS DWARF 2 extensions */ | ||||
|   .debug_weaknames 0 : { *(.debug_weaknames) } | ||||
|   .debug_funcnames 0 : { *(.debug_funcnames) } | ||||
|   .debug_typenames 0 : { *(.debug_typenames) } | ||||
|   .debug_varnames  0 : { *(.debug_varnames) } | ||||
|   /DISCARD/ : { *(.note.GNU-stack) } | ||||
| } | ||||
|  | @ -152,7 +152,8 @@ long target_mmap(unsigned long start, unsigned long len, int prot, | |||
|                  int flags, int fd, unsigned long offset) | ||||
| { | ||||
|     unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len; | ||||
| #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) | ||||
| #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \ | ||||
|     defined(__ia64) | ||||
|     static unsigned long last_start = 0x40000000; | ||||
| #endif | ||||
| 
 | ||||
|  | @ -191,7 +192,8 @@ long target_mmap(unsigned long start, unsigned long len, int prot, | |||
|     host_start = start & qemu_host_page_mask; | ||||
| 
 | ||||
|     if (!(flags & MAP_FIXED)) { | ||||
| #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) | ||||
| #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \ | ||||
|     defined(__ia64) | ||||
|         /* tell the kenel to search at the same place as i386 */ | ||||
|         if (host_start == 0) { | ||||
|             host_start = last_start; | ||||
|  |  | |||
|  | @ -26,13 +26,6 @@ | |||
| #include <errno.h> | ||||
| #include <sys/ucontext.h> | ||||
| 
 | ||||
| #ifdef __ia64__ | ||||
| #undef uc_mcontext | ||||
| #undef uc_sigmask | ||||
| #undef uc_stack | ||||
| #undef uc_link | ||||
| #endif  | ||||
| 
 | ||||
| #include "qemu.h" | ||||
| 
 | ||||
| //#define DEBUG_SIGNAL
 | ||||
|  | @ -557,11 +550,11 @@ typedef struct target_sigaltstack { | |||
| } target_stack_t; | ||||
| 
 | ||||
| struct target_ucontext { | ||||
|         target_ulong	  uc_flags; | ||||
| 	target_ulong      uc_link; | ||||
| 	target_stack_t	  uc_stack; | ||||
| 	struct target_sigcontext uc_mcontext; | ||||
| 	target_sigset_t	  uc_sigmask;	/* mask last for extensibility */ | ||||
|         target_ulong	  tuc_flags; | ||||
| 	target_ulong      tuc_link; | ||||
| 	target_stack_t	  tuc_stack; | ||||
| 	struct target_sigcontext tuc_mcontext; | ||||
| 	target_sigset_t	  tuc_sigmask;	/* mask last for extensibility */ | ||||
| }; | ||||
| 
 | ||||
| struct sigframe | ||||
|  | @ -743,16 +736,18 @@ static void setup_rt_frame(int sig, struct emulated_sigaction *ka, | |||
| 		goto give_sigsegv; | ||||
| 
 | ||||
| 	/* Create the ucontext.  */ | ||||
| 	err |= __put_user(0, &frame->uc.uc_flags); | ||||
| 	err |= __put_user(0, &frame->uc.uc_link); | ||||
| 	err |= __put_user(/*current->sas_ss_sp*/ 0, &frame->uc.uc_stack.ss_sp); | ||||
| 	err |= __put_user(0, &frame->uc.tuc_flags); | ||||
| 	err |= __put_user(0, &frame->uc.tuc_link); | ||||
| 	err |= __put_user(/*current->sas_ss_sp*/ 0, | ||||
| 			  &frame->uc.tuc_stack.ss_sp); | ||||
| 	err |= __put_user(/* sas_ss_flags(regs->esp) */ 0, | ||||
| 			  &frame->uc.uc_stack.ss_flags); | ||||
| 	err |= __put_user(/* current->sas_ss_size */ 0, &frame->uc.uc_stack.ss_size); | ||||
| 	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | ||||
| 			  &frame->uc.tuc_stack.ss_flags); | ||||
| 	err |= __put_user(/* current->sas_ss_size */ 0, | ||||
| 			  &frame->uc.tuc_stack.ss_size); | ||||
| 	err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, | ||||
| 			        env, set->sig[0]); | ||||
|         for(i = 0; i < TARGET_NSIG_WORDS; i++) { | ||||
|             if (__put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i])) | ||||
|             if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) | ||||
|                 goto give_sigsegv; | ||||
|         } | ||||
| 
 | ||||
|  | @ -880,14 +875,14 @@ long do_rt_sigreturn(CPUX86State *env) | |||
| 	if (verify_area(VERIFY_READ, frame, sizeof(*frame))) | ||||
| 		goto badframe; | ||||
| #endif | ||||
|         target_to_host_sigset(&set, &frame->uc.uc_sigmask); | ||||
|         target_to_host_sigset(&set, &frame->uc.tuc_sigmask); | ||||
|         sigprocmask(SIG_SETMASK, &set, NULL); | ||||
| 	 | ||||
| 	if (restore_sigcontext(env, &frame->uc.uc_mcontext, &eax)) | ||||
| 	if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax)) | ||||
| 		goto badframe; | ||||
| 
 | ||||
| #if 0 | ||||
| 	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) | ||||
| 	if (__copy_from_user(&st, &frame->uc.tuc_stack, sizeof(st))) | ||||
| 		goto badframe; | ||||
| 	/* It is more difficult to avoid calling this function than to
 | ||||
| 	   call it and ignore errors.  */ | ||||
|  | @ -933,11 +928,11 @@ typedef struct target_sigaltstack { | |||
| } target_stack_t; | ||||
| 
 | ||||
| struct target_ucontext { | ||||
|     target_ulong uc_flags; | ||||
|     target_ulong uc_link; | ||||
|     target_stack_t uc_stack; | ||||
|     struct target_sigcontext uc_mcontext; | ||||
|     target_sigset_t  uc_sigmask;	/* mask last for extensibility */ | ||||
|     target_ulong tuc_flags; | ||||
|     target_ulong tuc_link; | ||||
|     target_stack_t tuc_stack; | ||||
|     struct target_sigcontext tuc_mcontext; | ||||
|     target_sigset_t  tuc_sigmask;	/* mask last for extensibility */ | ||||
| }; | ||||
| 
 | ||||
| struct sigframe | ||||
|  | @ -1135,10 +1130,10 @@ static void setup_rt_frame(int usig, struct emulated_sigaction *ka, | |||
| 	/* Clear all the bits of the ucontext we don't use.  */ | ||||
| 	err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); | ||||
| 
 | ||||
| 	err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/ | ||||
| 	err |= setup_sigcontext(&frame->uc.tuc_mcontext, /*&frame->fpstate,*/ | ||||
| 				env, set->sig[0]); | ||||
|         for(i = 0; i < TARGET_NSIG_WORDS; i++) { | ||||
|             if (__put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i])) | ||||
|             if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) | ||||
|                 return; | ||||
|         } | ||||
| 
 | ||||
|  | @ -1253,10 +1248,10 @@ long do_rt_sigreturn(CPUState *env) | |||
| 	if (verify_area(VERIFY_READ, frame, sizeof (*frame))) | ||||
| 		goto badframe; | ||||
| #endif | ||||
|         target_to_host_sigset(&host_set, &frame->uc.uc_sigmask); | ||||
|         target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); | ||||
|         sigprocmask(SIG_SETMASK, &host_set, NULL); | ||||
| 
 | ||||
| 	if (restore_sigcontext(env, &frame->uc.uc_mcontext)) | ||||
| 	if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) | ||||
| 		goto badframe; | ||||
| 
 | ||||
| #if 0 | ||||
|  |  | |||
|  | @ -165,7 +165,7 @@ static void get_human_readable_size(char *buf, int buf_size, int64_t size) | |||
|     int i; | ||||
| 
 | ||||
|     if (size <= 999) { | ||||
|         snprintf(buf, buf_size, "%lld", size); | ||||
|         snprintf(buf, buf_size, "%lld", (long long) size); | ||||
|     } else { | ||||
|         base = 1024; | ||||
|         for(i = 0; i < NB_SUFFIXES; i++) { | ||||
|  | @ -176,7 +176,7 @@ static void get_human_readable_size(char *buf, int buf_size, int64_t size) | |||
|                 break; | ||||
|             } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) { | ||||
|                 snprintf(buf, buf_size, "%lld%c",  | ||||
|                          (size + (base >> 1)) / base, | ||||
|                          (long long) ((size + (base >> 1)) / base), | ||||
|                          suffixes[i]); | ||||
|                 break; | ||||
|             } | ||||
|  | @ -369,7 +369,7 @@ static int img_create(int argc, char **argv) | |||
|         printf(", backing_file=%s", | ||||
|                base_filename); | ||||
|     } | ||||
|     printf(", size=%lld kB\n", size / 1024); | ||||
|     printf(", size=%lld kB\n", (long long) (size / 1024)); | ||||
|     ret = bdrv_create(drv, filename, size / 512, base_filename, encrypted); | ||||
|     if (ret < 0) { | ||||
|         if (ret == -ENOTSUP) { | ||||
|  | @ -666,7 +666,7 @@ static int img_info(int argc, char **argv) | |||
|            "virtual size: %s (%lld bytes)\n" | ||||
|            "disk size: %s\n", | ||||
|            filename, fmt_name, size_buf,  | ||||
|            total_sectors * 512, | ||||
|            (long long) (total_sectors * 512), | ||||
|            dsize_buf); | ||||
|     if (bdrv_is_encrypted(bs)) | ||||
|         printf("encrypted: yes\n"); | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue
	
	 bellard
						bellard