aio: add a ThreadPool instance to AioContext
This patch adds a ThreadPool to AioContext. It's possible that some AioContext instances will never use the ThreadPool, so defer creation until aio_get_thread_pool(). The reason why AioContext should have the ThreadPool is because the ThreadPool is bound to a AioContext instance where the work item's callback function is invoked. It doesn't make sense to keep the ThreadPool pointer anywhere other than AioContext. For example, block/raw-posix.c can get its AioContext's ThreadPool and submit work. Special note about headers: I used struct ThreadPool in aio.h because there is a circular dependency if aio.h includes thread-pool.h. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									f7311ccc63
								
							
						
					
					
						commit
						9b34277d23
					
				
							
								
								
									
										11
									
								
								async.c
								
								
								
								
							
							
						
						
									
										11
									
								
								async.c
								
								
								
								
							| 
						 | 
				
			
			@ -24,6 +24,7 @@
 | 
			
		|||
 | 
			
		||||
#include "qemu-common.h"
 | 
			
		||||
#include "block/aio.h"
 | 
			
		||||
#include "block/thread-pool.h"
 | 
			
		||||
#include "qemu/main-loop.h"
 | 
			
		||||
 | 
			
		||||
/***********************************************************/
 | 
			
		||||
| 
						 | 
				
			
			@ -172,6 +173,7 @@ aio_ctx_finalize(GSource     *source)
 | 
			
		|||
{
 | 
			
		||||
    AioContext *ctx = (AioContext *) source;
 | 
			
		||||
 | 
			
		||||
    thread_pool_free(ctx->thread_pool);
 | 
			
		||||
    aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
 | 
			
		||||
    event_notifier_cleanup(&ctx->notifier);
 | 
			
		||||
    g_array_free(ctx->pollfds, TRUE);
 | 
			
		||||
| 
						 | 
				
			
			@ -190,6 +192,14 @@ GSource *aio_get_g_source(AioContext *ctx)
 | 
			
		|||
    return &ctx->source;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ThreadPool *aio_get_thread_pool(AioContext *ctx)
 | 
			
		||||
{
 | 
			
		||||
    if (!ctx->thread_pool) {
 | 
			
		||||
        ctx->thread_pool = thread_pool_new(ctx);
 | 
			
		||||
    }
 | 
			
		||||
    return ctx->thread_pool;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void aio_notify(AioContext *ctx)
 | 
			
		||||
{
 | 
			
		||||
    event_notifier_set(&ctx->notifier);
 | 
			
		||||
| 
						 | 
				
			
			@ -200,6 +210,7 @@ AioContext *aio_context_new(void)
 | 
			
		|||
    AioContext *ctx;
 | 
			
		||||
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
 | 
			
		||||
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
 | 
			
		||||
    ctx->thread_pool = NULL;
 | 
			
		||||
    event_notifier_init(&ctx->notifier, false);
 | 
			
		||||
    aio_set_event_notifier(ctx, &ctx->notifier, 
 | 
			
		||||
                           (EventNotifierHandler *)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -66,6 +66,9 @@ typedef struct AioContext {
 | 
			
		|||
 | 
			
		||||
    /* GPollFDs for aio_poll() */
 | 
			
		||||
    GArray *pollfds;
 | 
			
		||||
 | 
			
		||||
    /* Thread pool for performing work and receiving completion callbacks */
 | 
			
		||||
    struct ThreadPool *thread_pool;
 | 
			
		||||
} AioContext;
 | 
			
		||||
 | 
			
		||||
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
 | 
			
		||||
| 
						 | 
				
			
			@ -223,6 +226,9 @@ void aio_set_event_notifier(AioContext *ctx,
 | 
			
		|||
 */
 | 
			
		||||
GSource *aio_get_g_source(AioContext *ctx);
 | 
			
		||||
 | 
			
		||||
/* Return the ThreadPool bound to this AioContext */
 | 
			
		||||
struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
 | 
			
		||||
 | 
			
		||||
/* Functions to operate on the main QEMU AioContext.  */
 | 
			
		||||
 | 
			
		||||
bool qemu_aio_wait(void);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue