accel/tcg: Create queue_tb_flush from tb_flush
Rename the function and remove the path which performs the flush immediately. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
430014bee7
commit
a9519a4615
3 changed files with 10 additions and 20 deletions
|
|
@ -797,17 +797,12 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
|||
}
|
||||
}
|
||||
|
||||
void tb_flush(CPUState *cpu)
|
||||
void queue_tb_flush(CPUState *cs)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
|
||||
|
||||
if (cpu_in_serial_context(cpu)) {
|
||||
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
} else {
|
||||
async_safe_run_on_cpu(cpu, do_tb_flush,
|
||||
RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
}
|
||||
async_safe_run_on_cpu(cs, do_tb_flush,
|
||||
RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -289,7 +289,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
|
|||
tb = tcg_tb_alloc(tcg_ctx);
|
||||
if (unlikely(!tb)) {
|
||||
/* flush must be done */
|
||||
tb_flush(cpu);
|
||||
queue_tb_flush(cpu);
|
||||
mmap_unlock();
|
||||
/* Make the execution loop process the flush as soon as possible. */
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
|
|
|
|||
|
|
@ -24,19 +24,14 @@
|
|||
void tb_flush__exclusive_or_serial(void);
|
||||
|
||||
/**
|
||||
* tb_flush() - flush all translation blocks
|
||||
* @cs: CPUState (must be valid, but treated as anonymous pointer)
|
||||
* queue_tb_flush() - add flush to the cpu work queue
|
||||
* @cs: CPUState
|
||||
*
|
||||
* Used to flush all the translation blocks in the system. Sometimes
|
||||
* it is simpler to flush everything than work out which individual
|
||||
* translations are now invalid and ensure they are not called
|
||||
* anymore.
|
||||
*
|
||||
* tb_flush() takes care of running the flush in an exclusive context
|
||||
* if it is not already running in one. This means no guest code will
|
||||
* run until this complete.
|
||||
* Flush all translation blocks the next time @cs processes the work queue.
|
||||
* This should generally be followed by cpu_loop_exit(), so that the work
|
||||
* queue is processed promptly.
|
||||
*/
|
||||
void tb_flush(CPUState *cs);
|
||||
void queue_tb_flush(CPUState *cs);
|
||||
|
||||
void tcg_flush_jmp_cache(CPUState *cs);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue