xen: introduce an event channel for buffered io event notifications
Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive notifications for buffered io events. After the first notification is received leave the event channel masked and setup a timer to process the rest of the batch. Once we have completed processing the batch, unmask the event channel and delete the timer. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
This commit is contained in:
		
							parent
							
								
									852a7cec90
								
							
						
					
					
						commit
						fda1f76849
					
				
							
								
								
									
										45
									
								
								xen-all.c
								
								
								
								
							
							
						
						
									
										45
									
								
								xen-all.c
								
								
								
								
							| 
						 | 
				
			
			@ -59,6 +59,9 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
 | 
			
		|||
}
 | 
			
		||||
#  define FMT_ioreq_size "u"
 | 
			
		||||
#endif
 | 
			
		||||
#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
 | 
			
		||||
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define BUFFER_IO_MAX_DELAY  100
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -77,6 +80,8 @@ typedef struct XenIOState {
 | 
			
		|||
    QEMUTimer *buffered_io_timer;
 | 
			
		||||
    /* the evtchn port for polling the notification, */
 | 
			
		||||
    evtchn_port_t *ioreq_local_port;
 | 
			
		||||
    /* evtchn local port for buffered io */
 | 
			
		||||
    evtchn_port_t bufioreq_local_port;
 | 
			
		||||
    /* the evtchn fd for polling */
 | 
			
		||||
    XenEvtchn xce_handle;
 | 
			
		||||
    /* which vcpu we are serving */
 | 
			
		||||
| 
						 | 
				
			
			@ -629,6 +634,12 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
 | 
			
		|||
    evtchn_port_t port;
 | 
			
		||||
 | 
			
		||||
    port = xc_evtchn_pending(state->xce_handle);
 | 
			
		||||
    if (port == state->bufioreq_local_port) {
 | 
			
		||||
        qemu_mod_timer(state->buffered_io_timer,
 | 
			
		||||
                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
 | 
			
		||||
        return NULL;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (port != -1) {
 | 
			
		||||
        for (i = 0; i < smp_cpus; i++) {
 | 
			
		||||
            if (state->ioreq_local_port[i] == port) {
 | 
			
		||||
| 
						 | 
				
			
			@ -777,16 +788,18 @@ static void handle_ioreq(ioreq_t *req)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void handle_buffered_iopage(XenIOState *state)
 | 
			
		||||
static int handle_buffered_iopage(XenIOState *state)
 | 
			
		||||
{
 | 
			
		||||
    buf_ioreq_t *buf_req = NULL;
 | 
			
		||||
    ioreq_t req;
 | 
			
		||||
    int qw;
 | 
			
		||||
 | 
			
		||||
    if (!state->buffered_io_page) {
 | 
			
		||||
        return;
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    memset(&req, 0x00, sizeof(req));
 | 
			
		||||
 | 
			
		||||
    while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
 | 
			
		||||
        buf_req = &state->buffered_io_page->buf_ioreq[
 | 
			
		||||
            state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
 | 
			
		||||
| 
						 | 
				
			
			@ -811,15 +824,21 @@ static void handle_buffered_iopage(XenIOState *state)
 | 
			
		|||
        xen_mb();
 | 
			
		||||
        state->buffered_io_page->read_pointer += qw ? 2 : 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return req.count;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void handle_buffered_io(void *opaque)
 | 
			
		||||
{
 | 
			
		||||
    XenIOState *state = opaque;
 | 
			
		||||
 | 
			
		||||
    handle_buffered_iopage(state);
 | 
			
		||||
    qemu_mod_timer(state->buffered_io_timer,
 | 
			
		||||
                   BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
 | 
			
		||||
    if (handle_buffered_iopage(state)) {
 | 
			
		||||
        qemu_mod_timer(state->buffered_io_timer,
 | 
			
		||||
                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
 | 
			
		||||
    } else {
 | 
			
		||||
        qemu_del_timer(state->buffered_io_timer);
 | 
			
		||||
        xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void cpu_handle_ioreq(void *opaque)
 | 
			
		||||
| 
						 | 
				
			
			@ -949,7 +968,6 @@ static void xen_main_loop_prepare(XenIOState *state)
 | 
			
		|||
 | 
			
		||||
    state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
 | 
			
		||||
                                                 state);
 | 
			
		||||
    qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
 | 
			
		||||
 | 
			
		||||
    if (evtchn_fd != -1) {
 | 
			
		||||
        qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
 | 
			
		||||
| 
						 | 
				
			
			@ -1050,6 +1068,7 @@ int xen_hvm_init(void)
 | 
			
		|||
{
 | 
			
		||||
    int i, rc;
 | 
			
		||||
    unsigned long ioreq_pfn;
 | 
			
		||||
    unsigned long bufioreq_evtchn;
 | 
			
		||||
    XenIOState *state;
 | 
			
		||||
 | 
			
		||||
    state = g_malloc0(sizeof (XenIOState));
 | 
			
		||||
| 
						 | 
				
			
			@ -1102,6 +1121,20 @@ int xen_hvm_init(void)
 | 
			
		|||
        state->ioreq_local_port[i] = rc;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
 | 
			
		||||
            &bufioreq_evtchn);
 | 
			
		||||
    if (rc < 0) {
 | 
			
		||||
        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
 | 
			
		||||
            (uint32_t)bufioreq_evtchn);
 | 
			
		||||
    if (rc == -1) {
 | 
			
		||||
        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
 | 
			
		||||
        return -1;
 | 
			
		||||
    }
 | 
			
		||||
    state->bufioreq_local_port = rc;
 | 
			
		||||
 | 
			
		||||
    /* Init RAM management */
 | 
			
		||||
    xen_map_cache_init(xen_phys_offset_to_gaddr, state);
 | 
			
		||||
    xen_ram_init(ram_size);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue