Home | History | Annotate | Download | only in qemu
      1 /*
      2  * QEMU System Emulator
      3  *
      4  * Copyright (c) 2003-2008 Fabrice Bellard
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a copy
      7  * of this software and associated documentation files (the "Software"), to deal
      8  * in the Software without restriction, including without limitation the rights
      9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10  * copies of the Software, and to permit persons to whom the Software is
     11  * furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22  * THE SOFTWARE.
     23  */
     24 
     25 #include "qemu-common.h"
     26 #include "qemu-aio.h"
     27 
     28 /*
     29  * An AsyncContext protects the callbacks of AIO requests and Bottom Halves
     30  * against interfering with each other. A typical example is qcow2 that accepts
     31  * asynchronous requests, but relies for manipulation of its metadata on
     32  * synchronous bdrv_read/write that doesn't trigger any callbacks.
     33  *
     34  * However, these functions are often emulated using AIO which means that AIO
     35  * callbacks must be run - but at the same time we must not run callbacks of
     36  * other requests as they might start to modify metadata and corrupt the
     37  * internal state of the caller of bdrv_read/write.
     38  *
     39  * To achieve the desired semantics we switch into a new AsyncContext.
     40  * Callbacks must only be run if they belong to the current AsyncContext.
     41  * Otherwise they need to be queued until their own context is active again.
     42  * This is how you can make qemu_aio_wait() wait only for your own callbacks.
     43  *
     44  * The AsyncContexts form a stack. When you leave a AsyncContexts, you always
     45  * return to the old ("parent") context.
     46  */
     47 struct AsyncContext {
     48     /* Consecutive number of the AsyncContext (position in the stack) */
     49     int id;
     50 
     51     /* Anchor of the list of Bottom Halves belonging to the context */
     52     struct QEMUBH *first_bh;
     53 
     54     /* Link to parent context */
     55     struct AsyncContext *parent;
     56 };
     57 
     58 /* The currently active AsyncContext */
     59 static struct AsyncContext *async_context = &(struct AsyncContext) { 0 };
     60 
     61 /*
     62  * Enter a new AsyncContext. Already scheduled Bottom Halves and AIO callbacks
     63  * won't be called until this context is left again.
     64  */
     65 void async_context_push(void)
     66 {
     67     struct AsyncContext *new = qemu_mallocz(sizeof(*new));
     68     new->parent = async_context;
     69     new->id = async_context->id + 1;
     70     async_context = new;
     71 }
     72 
     73 /* Run queued AIO completions and destroy Bottom Half */
     74 static void bh_run_aio_completions(void *opaque)
     75 {
     76     QEMUBH **bh = opaque;
     77     qemu_bh_delete(*bh);
     78     qemu_free(bh);
     79     qemu_aio_process_queue();
     80 }
     81 /*
     82  * Leave the currently active AsyncContext. All Bottom Halves belonging to the
     83  * old context are executed before changing the context.
     84  */
     85 void async_context_pop(void)
     86 {
     87     struct AsyncContext *old = async_context;
     88     QEMUBH **bh;
     89 
     90     /* Flush the bottom halves, we don't want to lose them */
     91     while (qemu_bh_poll());
     92 
     93     /* Switch back to the parent context */
     94     async_context = async_context->parent;
     95     qemu_free(old);
     96 
     97     if (async_context == NULL) {
     98         abort();
     99     }
    100 
    101     /* Schedule BH to run any queued AIO completions as soon as possible */
    102     bh = qemu_malloc(sizeof(*bh));
    103     *bh = qemu_bh_new(bh_run_aio_completions, bh);
    104     qemu_bh_schedule(*bh);
    105 }
    106 
    107 /*
    108  * Returns the ID of the currently active AsyncContext
    109  */
    110 int get_async_context_id(void)
    111 {
    112     return async_context->id;
    113 }
    114 
    115 /***********************************************************/
    116 /* bottom halves (can be seen as timers which expire ASAP) */
    117 
    118 struct QEMUBH {
    119     QEMUBHFunc *cb;
    120     void *opaque;
    121     int scheduled;
    122     int idle;
    123     int deleted;
    124     QEMUBH *next;
    125 };
    126 
    127 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
    128 {
    129     QEMUBH *bh;
    130     bh = qemu_mallocz(sizeof(QEMUBH));
    131     bh->cb = cb;
    132     bh->opaque = opaque;
    133     bh->next = async_context->first_bh;
    134     async_context->first_bh = bh;
    135     return bh;
    136 }
    137 
    138 int qemu_bh_poll(void)
    139 {
    140     QEMUBH *bh, **bhp;
    141     int ret;
    142 
    143     ret = 0;
    144     for (bh = async_context->first_bh; bh; bh = bh->next) {
    145         if (!bh->deleted && bh->scheduled) {
    146             bh->scheduled = 0;
    147             if (!bh->idle)
    148                 ret = 1;
    149             bh->idle = 0;
    150             bh->cb(bh->opaque);
    151         }
    152     }
    153 
    154     /* remove deleted bhs */
    155     bhp = &async_context->first_bh;
    156     while (*bhp) {
    157         bh = *bhp;
    158         if (bh->deleted) {
    159             *bhp = bh->next;
    160             qemu_free(bh);
    161         } else
    162             bhp = &bh->next;
    163     }
    164 
    165     return ret;
    166 }
    167 
    168 void qemu_bh_schedule_idle(QEMUBH *bh)
    169 {
    170     if (bh->scheduled)
    171         return;
    172     bh->scheduled = 1;
    173     bh->idle = 1;
    174 }
    175 
    176 void qemu_bh_schedule(QEMUBH *bh)
    177 {
    178     if (bh->scheduled)
    179         return;
    180     bh->scheduled = 1;
    181     bh->idle = 0;
    182     /* stop the currently executing CPU to execute the BH ASAP */
    183     qemu_notify_event();
    184 }
    185 
    186 void qemu_bh_cancel(QEMUBH *bh)
    187 {
    188     bh->scheduled = 0;
    189 }
    190 
    191 void qemu_bh_delete(QEMUBH *bh)
    192 {
    193     bh->scheduled = 0;
    194     bh->deleted = 1;
    195 }
    196 
    197 void qemu_bh_update_timeout(int *timeout)
    198 {
    199     QEMUBH *bh;
    200 
    201     for (bh = async_context->first_bh; bh; bh = bh->next) {
    202         if (!bh->deleted && bh->scheduled) {
    203             if (bh->idle) {
    204                 /* idle bottom halves will be polled at least
    205                  * every 10ms */
    206                 *timeout = MIN(10, *timeout);
    207             } else {
    208                 /* non-idle bottom halves will be executed
    209                  * immediately */
    210                 *timeout = 0;
    211                 break;
    212             }
    213         }
    214     }
    215 }
    216 
    217