- build bugfix from Fam and new configure check from Emilio
- two improvements to "info mtere" from Gerd - KVM support for memory transaction attributes - one more small step towards unlocked MMIO dispatch - one piece of the qemu-nbd errno fixes - trivial-ish patches from Denis and Thomas -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJVTLBhAAoJEL/70l94x66DkGIH/jlNJBMBGhlH/lwb1LzxtAMX OxyDxsiwJpSxsOiZiY3oRz7d6VV6TCrmx5L+1HgG5IzU3WC61Tq6/FK4EXLepZIH GSYlLtAALWny+2Uwsyh1Z7MMr5yxyhgeORk/l7O8pncmMeysludbEaJqQg8Aa9A2 j0B2pv4tvcl/qhuIFXL1YlbYMVXMsZy5W65D8jq+B6qf3q8kUcdKvgvbUMrxAiSH JMISo4Z32t9w8SGnhlBa9s4HfN2yOvULRAozzkDBAu4c41cZrw16lvTV8XotamnU LrG6eQ+2PFeIrcGhuIu7z5Bi4yiRRiThfRLCAvVApVTQYUf7IwvPNa5K1FrP9YU= =Z/UD -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging - build bugfix from Fam and new configure check from Emilio - two improvements to "info mtere" from Gerd - KVM support for memory transaction attributes - one more small step towards unlocked MMIO dispatch - one piece of the qemu-nbd errno fixes - trivial-ish patches from Denis and Thomas # gpg: Signature made Fri May 8 13:47:29 2015 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: qemu-nbd: only send a limited number of errno codes on the wire rules.mak: Force CFLAGS for all objects in DSO configure: require __thread support exec: move rcu_read_lock/unlock to address_space_translate callers kvm: add support for memory transaction attributes mtree: also print disabled regions mtree: tag & indent a bit better apic_common: improve readability of apic_reset_common kvm: Silence warning from valgrind Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
b951cda21d
|
@ -1556,6 +1556,17 @@ if test "$static" = "yes" ; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Unconditional check for compiler __thread support
|
||||||
|
cat > $TMPC << EOF
|
||||||
|
static __thread int tls_var;
|
||||||
|
int main(void) { return tls_var; }
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if ! compile_prog "-Werror" "" ; then
|
||||||
|
error_exit "Your compiler does not support the __thread specifier for " \
|
||||||
|
"Thread-Local Storage (TLS). Please upgrade to a version that does."
|
||||||
|
fi
|
||||||
|
|
||||||
if test "$pie" = ""; then
|
if test "$pie" = ""; then
|
||||||
case "$cpu-$targetos" in
|
case "$cpu-$targetos" in
|
||||||
i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
|
i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
|
||||||
|
|
33
exec.c
33
exec.c
|
@ -373,6 +373,7 @@ static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called from RCU critical section */
|
||||||
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||||
hwaddr *xlat, hwaddr *plen,
|
hwaddr *xlat, hwaddr *plen,
|
||||||
bool is_write)
|
bool is_write)
|
||||||
|
@ -381,7 +382,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section;
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
|
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
|
||||||
section = address_space_translate_internal(d, addr, &addr, plen, true);
|
section = address_space_translate_internal(d, addr, &addr, plen, true);
|
||||||
|
@ -409,7 +409,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
*xlat = addr;
|
*xlat = addr;
|
||||||
rcu_read_unlock();
|
|
||||||
return mr;
|
return mr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2329,6 +2328,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
MemTxResult result = MEMTX_OK;
|
MemTxResult result = MEMTX_OK;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
l = len;
|
l = len;
|
||||||
mr = address_space_translate(as, addr, &addr1, &l, is_write);
|
mr = address_space_translate(as, addr, &addr1, &l, is_write);
|
||||||
|
@ -2415,6 +2415,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||||
buf += l;
|
buf += l;
|
||||||
addr += l;
|
addr += l;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -2452,6 +2453,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
l = len;
|
l = len;
|
||||||
mr = address_space_translate(as, addr, &addr1, &l, true);
|
mr = address_space_translate(as, addr, &addr1, &l, true);
|
||||||
|
@ -2477,6 +2479,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
|
||||||
buf += l;
|
buf += l;
|
||||||
addr += l;
|
addr += l;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* used for ROM loading : can write in RAM and ROM */
|
/* used for ROM loading : can write in RAM and ROM */
|
||||||
|
@ -2585,6 +2588,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
hwaddr l, xlat;
|
hwaddr l, xlat;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
l = len;
|
l = len;
|
||||||
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
||||||
|
@ -2598,6 +2602,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
|
||||||
len -= l;
|
len -= l;
|
||||||
addr += l;
|
addr += l;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2624,9 +2629,12 @@ void *address_space_map(AddressSpace *as,
|
||||||
}
|
}
|
||||||
|
|
||||||
l = len;
|
l = len;
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
||||||
|
|
||||||
if (!memory_access_is_direct(mr, is_write)) {
|
if (!memory_access_is_direct(mr, is_write)) {
|
||||||
if (atomic_xchg(&bounce.in_use, true)) {
|
if (atomic_xchg(&bounce.in_use, true)) {
|
||||||
|
rcu_read_unlock();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
/* Avoid unbounded allocations */
|
/* Avoid unbounded allocations */
|
||||||
|
@ -2642,6 +2650,7 @@ void *address_space_map(AddressSpace *as,
|
||||||
bounce.buffer, l);
|
bounce.buffer, l);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
*plen = l;
|
*plen = l;
|
||||||
return bounce.buffer;
|
return bounce.buffer;
|
||||||
}
|
}
|
||||||
|
@ -2665,6 +2674,7 @@ void *address_space_map(AddressSpace *as,
|
||||||
}
|
}
|
||||||
|
|
||||||
memory_region_ref(mr);
|
memory_region_ref(mr);
|
||||||
|
rcu_read_unlock();
|
||||||
*plen = done;
|
*plen = done;
|
||||||
return qemu_ram_ptr_length(raddr + base, plen);
|
return qemu_ram_ptr_length(raddr + base, plen);
|
||||||
}
|
}
|
||||||
|
@ -2728,6 +2738,7 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr1, &l, false);
|
mr = address_space_translate(as, addr, &addr1, &l, false);
|
||||||
if (l < 4 || !memory_access_is_direct(mr, false)) {
|
if (l < 4 || !memory_access_is_direct(mr, false)) {
|
||||||
/* I/O case */
|
/* I/O case */
|
||||||
|
@ -2762,6 +2773,7 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
|
||||||
if (result) {
|
if (result) {
|
||||||
*result = r;
|
*result = r;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2814,6 +2826,7 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr1, &l,
|
mr = address_space_translate(as, addr, &addr1, &l,
|
||||||
false);
|
false);
|
||||||
if (l < 8 || !memory_access_is_direct(mr, false)) {
|
if (l < 8 || !memory_access_is_direct(mr, false)) {
|
||||||
|
@ -2849,6 +2862,7 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
|
||||||
if (result) {
|
if (result) {
|
||||||
*result = r;
|
*result = r;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2921,6 +2935,7 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr1, &l,
|
mr = address_space_translate(as, addr, &addr1, &l,
|
||||||
false);
|
false);
|
||||||
if (l < 2 || !memory_access_is_direct(mr, false)) {
|
if (l < 2 || !memory_access_is_direct(mr, false)) {
|
||||||
|
@ -2956,6 +2971,7 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
|
||||||
if (result) {
|
if (result) {
|
||||||
*result = r;
|
*result = r;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3007,6 +3023,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr1, &l,
|
mr = address_space_translate(as, addr, &addr1, &l,
|
||||||
true);
|
true);
|
||||||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||||
|
@ -3029,6 +3046,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||||
if (result) {
|
if (result) {
|
||||||
*result = r;
|
*result = r;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||||
|
@ -3049,6 +3067,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr1, &l,
|
mr = address_space_translate(as, addr, &addr1, &l,
|
||||||
true);
|
true);
|
||||||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||||
|
@ -3083,6 +3102,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
|
||||||
if (result) {
|
if (result) {
|
||||||
*result = r;
|
*result = r;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||||
|
@ -3152,6 +3172,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
|
||||||
hwaddr addr1;
|
hwaddr addr1;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr1, &l, true);
|
mr = address_space_translate(as, addr, &addr1, &l, true);
|
||||||
if (l < 2 || !memory_access_is_direct(mr, true)) {
|
if (l < 2 || !memory_access_is_direct(mr, true)) {
|
||||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||||
|
@ -3185,6 +3206,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
|
||||||
if (result) {
|
if (result) {
|
||||||
*result = r;
|
*result = r;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||||
|
@ -3322,12 +3344,15 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr)
|
||||||
{
|
{
|
||||||
MemoryRegion*mr;
|
MemoryRegion*mr;
|
||||||
hwaddr l = 1;
|
hwaddr l = 1;
|
||||||
|
bool res;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(&address_space_memory,
|
mr = address_space_translate(&address_space_memory,
|
||||||
phys_addr, &phys_addr, &l, false);
|
phys_addr, &phys_addr, &l, false);
|
||||||
|
|
||||||
return !(memory_region_is_ram(mr) ||
|
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
|
||||||
memory_region_is_romd(mr));
|
rcu_read_unlock();
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
||||||
|
|
|
@ -233,11 +233,10 @@ static void apic_reset_common(DeviceState *dev)
|
||||||
{
|
{
|
||||||
APICCommonState *s = APIC_COMMON(dev);
|
APICCommonState *s = APIC_COMMON(dev);
|
||||||
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
|
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
|
||||||
bool bsp;
|
uint32_t bsp;
|
||||||
|
|
||||||
bsp = cpu_is_bsp(s->cpu);
|
bsp = s->apicbase & MSR_IA32_APICBASE_BSP;
|
||||||
s->apicbase = APIC_DEFAULT_ADDRESS |
|
s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE;
|
||||||
(bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE;
|
|
||||||
|
|
||||||
s->vapic_paddr = 0;
|
s->vapic_paddr = 0;
|
||||||
info->vapic_base_update(s);
|
info->vapic_base_update(s);
|
||||||
|
|
|
@ -270,13 +270,14 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
|
||||||
* this IOMMU to its immediate target. We need to translate
|
* this IOMMU to its immediate target. We need to translate
|
||||||
* it the rest of the way through to memory.
|
* it the rest of the way through to memory.
|
||||||
*/
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(&address_space_memory,
|
mr = address_space_translate(&address_space_memory,
|
||||||
iotlb->translated_addr,
|
iotlb->translated_addr,
|
||||||
&xlat, &len, iotlb->perm & IOMMU_WO);
|
&xlat, &len, iotlb->perm & IOMMU_WO);
|
||||||
if (!memory_region_is_ram(mr)) {
|
if (!memory_region_is_ram(mr)) {
|
||||||
error_report("iommu map to non memory area %"HWADDR_PRIx"",
|
error_report("iommu map to non memory area %"HWADDR_PRIx"",
|
||||||
xlat);
|
xlat);
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Translation truncates length to the IOMMU page size,
|
* Translation truncates length to the IOMMU page size,
|
||||||
|
@ -284,7 +285,7 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
|
||||||
*/
|
*/
|
||||||
if (len & iotlb->addr_mask) {
|
if (len & iotlb->addr_mask) {
|
||||||
error_report("iommu has granularity incompatible with target AS");
|
error_report("iommu has granularity incompatible with target AS");
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
|
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
|
||||||
|
@ -307,6 +308,8 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
|
||||||
iotlb->addr_mask + 1, ret);
|
iotlb->addr_mask + 1, ret);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vfio_listener_region_add(MemoryListener *listener,
|
static void vfio_listener_region_add(MemoryListener *listener,
|
||||||
|
|
|
@ -1233,7 +1233,9 @@ void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* address_space_translate: translate an address range into an address space
|
/* address_space_translate: translate an address range into an address space
|
||||||
* into a MemoryRegion and an address range into that section
|
* into a MemoryRegion and an address range into that section. Should be
|
||||||
|
* called from an RCU critical section, to avoid that the last reference
|
||||||
|
* to the returned region disappears after address_space_translate returns.
|
||||||
*
|
*
|
||||||
* @as: #AddressSpace to be accessed
|
* @as: #AddressSpace to be accessed
|
||||||
* @addr: address within that address space
|
* @addr: address within that address space
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include "config-host.h"
|
#include "config-host.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
#include "qom/cpu.h"
|
#include "qom/cpu.h"
|
||||||
|
#include "exec/memattrs.h"
|
||||||
|
|
||||||
#ifdef CONFIG_KVM
|
#ifdef CONFIG_KVM
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
|
@ -254,7 +255,7 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test);
|
||||||
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
|
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
|
||||||
|
|
||||||
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
|
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
|
||||||
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
|
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
|
||||||
|
|
||||||
int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
|
int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
|
||||||
|
|
||||||
|
|
21
kvm-all.c
21
kvm-all.c
|
@ -1669,14 +1669,14 @@ void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
|
||||||
s->sigmask_len = sigmask_len;
|
s->sigmask_len = sigmask_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
|
static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
|
||||||
uint32_t count)
|
int size, uint32_t count)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
uint8_t *ptr = data;
|
uint8_t *ptr = data;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
|
address_space_rw(&address_space_io, port, attrs,
|
||||||
ptr, size,
|
ptr, size,
|
||||||
direction == KVM_EXIT_IO_OUT);
|
direction == KVM_EXIT_IO_OUT);
|
||||||
ptr += size;
|
ptr += size;
|
||||||
|
@ -1796,6 +1796,8 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
MemTxAttrs attrs;
|
||||||
|
|
||||||
if (cpu->kvm_vcpu_dirty) {
|
if (cpu->kvm_vcpu_dirty) {
|
||||||
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
|
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
|
||||||
cpu->kvm_vcpu_dirty = false;
|
cpu->kvm_vcpu_dirty = false;
|
||||||
|
@ -1816,7 +1818,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||||
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
|
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
|
||||||
|
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
kvm_arch_post_run(cpu, run);
|
attrs = kvm_arch_post_run(cpu, run);
|
||||||
|
|
||||||
if (run_ret < 0) {
|
if (run_ret < 0) {
|
||||||
if (run_ret == -EINTR || run_ret == -EAGAIN) {
|
if (run_ret == -EINTR || run_ret == -EAGAIN) {
|
||||||
|
@ -1834,7 +1836,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||||
switch (run->exit_reason) {
|
switch (run->exit_reason) {
|
||||||
case KVM_EXIT_IO:
|
case KVM_EXIT_IO:
|
||||||
DPRINTF("handle_io\n");
|
DPRINTF("handle_io\n");
|
||||||
kvm_handle_io(run->io.port,
|
kvm_handle_io(run->io.port, attrs,
|
||||||
(uint8_t *)run + run->io.data_offset,
|
(uint8_t *)run + run->io.data_offset,
|
||||||
run->io.direction,
|
run->io.direction,
|
||||||
run->io.size,
|
run->io.size,
|
||||||
|
@ -1843,10 +1845,11 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||||
break;
|
break;
|
||||||
case KVM_EXIT_MMIO:
|
case KVM_EXIT_MMIO:
|
||||||
DPRINTF("handle_mmio\n");
|
DPRINTF("handle_mmio\n");
|
||||||
cpu_physical_memory_rw(run->mmio.phys_addr,
|
address_space_rw(&address_space_memory,
|
||||||
run->mmio.data,
|
run->mmio.phys_addr, attrs,
|
||||||
run->mmio.len,
|
run->mmio.data,
|
||||||
run->mmio.is_write);
|
run->mmio.len,
|
||||||
|
run->mmio.is_write);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
case KVM_EXIT_IRQ_WINDOW_OPEN:
|
case KVM_EXIT_IRQ_WINDOW_OPEN:
|
||||||
|
|
23
memory.c
23
memory.c
|
@ -2089,7 +2089,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
|
||||||
const MemoryRegion *submr;
|
const MemoryRegion *submr;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (!mr || !mr->enabled) {
|
if (!mr) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2115,7 +2115,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
|
||||||
}
|
}
|
||||||
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
|
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
|
||||||
" (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
|
" (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
|
||||||
"-" TARGET_FMT_plx "\n",
|
"-" TARGET_FMT_plx "%s\n",
|
||||||
base + mr->addr,
|
base + mr->addr,
|
||||||
base + mr->addr
|
base + mr->addr
|
||||||
+ (int128_nz(mr->size) ?
|
+ (int128_nz(mr->size) ?
|
||||||
|
@ -2131,10 +2131,11 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
|
||||||
mr->alias_offset
|
mr->alias_offset
|
||||||
+ (int128_nz(mr->size) ?
|
+ (int128_nz(mr->size) ?
|
||||||
(hwaddr)int128_get64(int128_sub(mr->size,
|
(hwaddr)int128_get64(int128_sub(mr->size,
|
||||||
int128_one())) : 0));
|
int128_one())) : 0),
|
||||||
|
mr->enabled ? "" : " [disabled]");
|
||||||
} else {
|
} else {
|
||||||
mon_printf(f,
|
mon_printf(f,
|
||||||
TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
|
TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
|
||||||
base + mr->addr,
|
base + mr->addr,
|
||||||
base + mr->addr
|
base + mr->addr
|
||||||
+ (int128_nz(mr->size) ?
|
+ (int128_nz(mr->size) ?
|
||||||
|
@ -2144,7 +2145,8 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
|
||||||
mr->romd_mode ? 'R' : '-',
|
mr->romd_mode ? 'R' : '-',
|
||||||
!mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
|
!mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
|
||||||
: '-',
|
: '-',
|
||||||
memory_region_name(mr));
|
memory_region_name(mr),
|
||||||
|
mr->enabled ? "" : " [disabled]");
|
||||||
}
|
}
|
||||||
|
|
||||||
QTAILQ_INIT(&submr_print_queue);
|
QTAILQ_INIT(&submr_print_queue);
|
||||||
|
@ -2185,15 +2187,16 @@ void mtree_info(fprintf_function mon_printf, void *f)
|
||||||
QTAILQ_INIT(&ml_head);
|
QTAILQ_INIT(&ml_head);
|
||||||
|
|
||||||
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
||||||
mon_printf(f, "%s\n", as->name);
|
mon_printf(f, "address-space: %s\n", as->name);
|
||||||
mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head);
|
mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
|
||||||
|
mon_printf(f, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
mon_printf(f, "aliases\n");
|
|
||||||
/* print aliased regions */
|
/* print aliased regions */
|
||||||
QTAILQ_FOREACH(ml, &ml_head, queue) {
|
QTAILQ_FOREACH(ml, &ml_head, queue) {
|
||||||
mon_printf(f, "%s\n", memory_region_name(ml->mr));
|
mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
|
||||||
mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
|
mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
|
||||||
|
mon_printf(f, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
|
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
|
||||||
|
|
57
nbd.c
57
nbd.c
|
@ -86,6 +86,59 @@
|
||||||
#define NBD_OPT_ABORT (2)
|
#define NBD_OPT_ABORT (2)
|
||||||
#define NBD_OPT_LIST (3)
|
#define NBD_OPT_LIST (3)
|
||||||
|
|
||||||
|
/* NBD errors are based on errno numbers, so there is a 1:1 mapping,
|
||||||
|
* but only a limited set of errno values is specified in the protocol.
|
||||||
|
* Everything else is squashed to EINVAL.
|
||||||
|
*/
|
||||||
|
#define NBD_SUCCESS 0
|
||||||
|
#define NBD_EPERM 1
|
||||||
|
#define NBD_EIO 5
|
||||||
|
#define NBD_ENOMEM 12
|
||||||
|
#define NBD_EINVAL 22
|
||||||
|
#define NBD_ENOSPC 28
|
||||||
|
|
||||||
|
static int system_errno_to_nbd_errno(int err)
|
||||||
|
{
|
||||||
|
switch (err) {
|
||||||
|
case 0:
|
||||||
|
return NBD_SUCCESS;
|
||||||
|
case EPERM:
|
||||||
|
return NBD_EPERM;
|
||||||
|
case EIO:
|
||||||
|
return NBD_EIO;
|
||||||
|
case ENOMEM:
|
||||||
|
return NBD_ENOMEM;
|
||||||
|
#ifdef EDQUOT
|
||||||
|
case EDQUOT:
|
||||||
|
#endif
|
||||||
|
case EFBIG:
|
||||||
|
case ENOSPC:
|
||||||
|
return NBD_ENOSPC;
|
||||||
|
case EINVAL:
|
||||||
|
default:
|
||||||
|
return NBD_EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nbd_errno_to_system_errno(int err)
|
||||||
|
{
|
||||||
|
switch (err) {
|
||||||
|
case NBD_SUCCESS:
|
||||||
|
return 0;
|
||||||
|
case NBD_EPERM:
|
||||||
|
return EPERM;
|
||||||
|
case NBD_EIO:
|
||||||
|
return EIO;
|
||||||
|
case NBD_ENOMEM:
|
||||||
|
return ENOMEM;
|
||||||
|
case NBD_ENOSPC:
|
||||||
|
return ENOSPC;
|
||||||
|
case NBD_EINVAL:
|
||||||
|
default:
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Definitions for opaque data types */
|
/* Definitions for opaque data types */
|
||||||
|
|
||||||
typedef struct NBDRequest NBDRequest;
|
typedef struct NBDRequest NBDRequest;
|
||||||
|
@ -856,6 +909,8 @@ ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply)
|
||||||
reply->error = be32_to_cpup((uint32_t*)(buf + 4));
|
reply->error = be32_to_cpup((uint32_t*)(buf + 4));
|
||||||
reply->handle = be64_to_cpup((uint64_t*)(buf + 8));
|
reply->handle = be64_to_cpup((uint64_t*)(buf + 8));
|
||||||
|
|
||||||
|
reply->error = nbd_errno_to_system_errno(reply->error);
|
||||||
|
|
||||||
TRACE("Got reply: "
|
TRACE("Got reply: "
|
||||||
"{ magic = 0x%x, .error = %d, handle = %" PRIu64" }",
|
"{ magic = 0x%x, .error = %d, handle = %" PRIu64" }",
|
||||||
magic, reply->error, reply->handle);
|
magic, reply->error, reply->handle);
|
||||||
|
@ -872,6 +927,8 @@ static ssize_t nbd_send_reply(int csock, struct nbd_reply *reply)
|
||||||
uint8_t buf[NBD_REPLY_SIZE];
|
uint8_t buf[NBD_REPLY_SIZE];
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
|
reply->error = system_errno_to_nbd_errno(reply->error);
|
||||||
|
|
||||||
/* Reply
|
/* Reply
|
||||||
[ 0 .. 3] magic (NBD_REPLY_MAGIC)
|
[ 0 .. 3] magic (NBD_REPLY_MAGIC)
|
||||||
[ 4 .. 7] error (0 == no error)
|
[ 4 .. 7] error (0 == no error)
|
||||||
|
|
|
@ -102,7 +102,8 @@ endif
|
||||||
%.o: %.dtrace
|
%.o: %.dtrace
|
||||||
$(call quiet-command,dtrace -o $@ -G -s $<, " GEN $(TARGET_DIR)$@")
|
$(call quiet-command,dtrace -o $@ -G -s $<, " GEN $(TARGET_DIR)$@")
|
||||||
|
|
||||||
%$(DSOSUF): CFLAGS += -fPIC -DBUILD_DSO
|
DSO_OBJ_CFLAGS := -fPIC -DBUILD_DSO
|
||||||
|
module-common.o: CFLAGS += $(DSO_OBJ_CFLAGS)
|
||||||
%$(DSOSUF): LDFLAGS += $(LDFLAGS_SHARED)
|
%$(DSOSUF): LDFLAGS += $(LDFLAGS_SHARED)
|
||||||
%$(DSOSUF): %.mo
|
%$(DSOSUF): %.mo
|
||||||
$(call LINK,$^)
|
$(call LINK,$^)
|
||||||
|
@ -351,6 +352,7 @@ define unnest-vars
|
||||||
# For non-module build, add -m to -y
|
# For non-module build, add -m to -y
|
||||||
$(if $(CONFIG_MODULES),
|
$(if $(CONFIG_MODULES),
|
||||||
$(foreach o,$($v),
|
$(foreach o,$($v),
|
||||||
|
$(eval $($o-objs): CFLAGS += $(DSO_OBJ_CFLAGS))
|
||||||
$(eval $o: $($o-objs)))
|
$(eval $o: $($o-objs)))
|
||||||
$(eval $(patsubst %-m,%-y,$v) += $($v))
|
$(eval $(patsubst %-m,%-y,$v) += $($v))
|
||||||
$(eval modules: $($v:%.mo=%$(DSOSUF))),
|
$(eval modules: $($v:%.mo=%$(DSOSUF))),
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "internals.h"
|
#include "internals.h"
|
||||||
#include "hw/arm/arm.h"
|
#include "hw/arm/arm.h"
|
||||||
|
#include "exec/memattrs.h"
|
||||||
|
|
||||||
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
||||||
KVM_CAP_LAST_INFO
|
KVM_CAP_LAST_INFO
|
||||||
|
@ -506,8 +507,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
|
return MEMTXATTRS_UNSPECIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include "hw/pci/pci.h"
|
#include "hw/pci/pci.h"
|
||||||
#include "migration/migration.h"
|
#include "migration/migration.h"
|
||||||
#include "qapi/qmp/qerror.h"
|
#include "qapi/qmp/qerror.h"
|
||||||
|
#include "exec/memattrs.h"
|
||||||
|
|
||||||
//#define DEBUG_KVM
|
//#define DEBUG_KVM
|
||||||
|
|
||||||
|
@ -2246,7 +2247,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||||
CPUX86State *env = &x86_cpu->env;
|
CPUX86State *env = &x86_cpu->env;
|
||||||
|
@ -2258,6 +2259,7 @@ void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
||||||
}
|
}
|
||||||
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
|
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
|
||||||
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
|
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
|
||||||
|
return MEMTXATTRS_UNSPECIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_process_async_events(CPUState *cs)
|
int kvm_arch_process_async_events(CPUState *cs)
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
#include "kvm_mips.h"
|
#include "kvm_mips.h"
|
||||||
|
#include "exec/memattrs.h"
|
||||||
|
|
||||||
#define DEBUG_KVM 0
|
#define DEBUG_KVM 0
|
||||||
|
|
||||||
|
@ -110,9 +111,10 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
DPRINTF("%s\n", __func__);
|
DPRINTF("%s\n", __func__);
|
||||||
|
return MEMTXATTRS_UNSPECIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_process_async_events(CPUState *cs)
|
int kvm_arch_process_async_events(CPUState *cs)
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include "sysemu/watchdog.h"
|
#include "sysemu/watchdog.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "exec/gdbstub.h"
|
#include "exec/gdbstub.h"
|
||||||
|
#include "exec/memattrs.h"
|
||||||
|
|
||||||
//#define DEBUG_KVM
|
//#define DEBUG_KVM
|
||||||
|
|
||||||
|
@ -1270,8 +1271,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||||
* anyways, so we will get a chance to deliver the rest. */
|
* anyways, so we will get a chance to deliver the rest. */
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
|
return MEMTXATTRS_UNSPECIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_process_async_events(CPUState *cs)
|
int kvm_arch_process_async_events(CPUState *cs)
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
#include "hw/s390x/s390-pci-bus.h"
|
#include "hw/s390x/s390-pci-bus.h"
|
||||||
#include "hw/s390x/ipl.h"
|
#include "hw/s390x/ipl.h"
|
||||||
#include "hw/s390x/ebcdic.h"
|
#include "hw/s390x/ebcdic.h"
|
||||||
|
#include "exec/memattrs.h"
|
||||||
|
|
||||||
/* #define DEBUG_KVM */
|
/* #define DEBUG_KVM */
|
||||||
|
|
||||||
|
@ -780,8 +781,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
|
return MEMTXATTRS_UNSPECIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_process_async_events(CPUState *cs)
|
int kvm_arch_process_async_events(CPUState *cs)
|
||||||
|
|
|
@ -1416,14 +1416,17 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
hwaddr l = 1;
|
hwaddr l = 1;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mr = address_space_translate(as, addr, &addr, &l, false);
|
mr = address_space_translate(as, addr, &addr, &l, false);
|
||||||
if (!(memory_region_is_ram(mr)
|
if (!(memory_region_is_ram(mr)
|
||||||
|| memory_region_is_romd(mr))) {
|
|| memory_region_is_romd(mr))) {
|
||||||
|
rcu_read_unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
|
ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
|
||||||
+ addr;
|
+ addr;
|
||||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
|
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
#endif /* !defined(CONFIG_USER_ONLY) */
|
#endif /* !defined(CONFIG_USER_ONLY) */
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue