qemu
/
cpu-common.c
454 строки · 12.4 Кб
1/*
2* CPU thread main loop - common bits for user and system mode emulation
3*
4* Copyright (c) 2003-2005 Fabrice Bellard
5*
6* This library is free software; you can redistribute it and/or
7* modify it under the terms of the GNU Lesser General Public
8* License as published by the Free Software Foundation; either
9* version 2.1 of the License, or (at your option) any later version.
10*
11* This library is distributed in the hope that it will be useful,
12* but WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* Lesser General Public License for more details.
15*
16* You should have received a copy of the GNU Lesser General Public
17* License along with this library; if not, see <http://www.gnu.org/licenses/>.
18*/
19
20#include "qemu/osdep.h"21#include "qemu/main-loop.h"22#include "exec/cpu-common.h"23#include "hw/core/cpu.h"24#include "sysemu/cpus.h"25#include "qemu/lockable.h"26#include "trace/trace-root.h"27
28QemuMutex qemu_cpu_list_lock;29static QemuCond exclusive_cond;30static QemuCond exclusive_resume;31static QemuCond qemu_work_cond;32
33/* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
34* under qemu_cpu_list_lock, read with atomic operations.
35*/
36static int pending_cpus;37
38void qemu_init_cpu_list(void)39{
40/* This is needed because qemu_init_cpu_list is also called by the41* child process in a fork. */
42pending_cpus = 0;43
44qemu_mutex_init(&qemu_cpu_list_lock);45qemu_cond_init(&exclusive_cond);46qemu_cond_init(&exclusive_resume);47qemu_cond_init(&qemu_work_cond);48}
49
50void cpu_list_lock(void)51{
52qemu_mutex_lock(&qemu_cpu_list_lock);53}
54
55void cpu_list_unlock(void)56{
57qemu_mutex_unlock(&qemu_cpu_list_lock);58}
59
60
61int cpu_get_free_index(void)62{
63CPUState *some_cpu;64int max_cpu_index = 0;65
66CPU_FOREACH(some_cpu) {67if (some_cpu->cpu_index >= max_cpu_index) {68max_cpu_index = some_cpu->cpu_index + 1;69}70}71return max_cpu_index;72}
73
74CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER(cpus_queue);75static unsigned int cpu_list_generation_id;76
77unsigned int cpu_list_generation_id_get(void)78{
79return cpu_list_generation_id;80}
81
82void cpu_list_add(CPUState *cpu)83{
84static bool cpu_index_auto_assigned;85
86QEMU_LOCK_GUARD(&qemu_cpu_list_lock);87if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {88cpu_index_auto_assigned = true;89cpu->cpu_index = cpu_get_free_index();90assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);91} else {92assert(!cpu_index_auto_assigned);93}94QTAILQ_INSERT_TAIL_RCU(&cpus_queue, cpu, node);95cpu_list_generation_id++;96}
97
98void cpu_list_remove(CPUState *cpu)99{
100QEMU_LOCK_GUARD(&qemu_cpu_list_lock);101if (!QTAILQ_IN_USE(cpu, node)) {102/* there is nothing to undo since cpu_exec_init() hasn't been called */103return;104}105
106QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);107cpu->cpu_index = UNASSIGNED_CPU_INDEX;108cpu_list_generation_id++;109}
110
111CPUState *qemu_get_cpu(int index)112{
113CPUState *cpu;114
115CPU_FOREACH(cpu) {116if (cpu->cpu_index == index) {117return cpu;118}119}120
121return NULL;122}
123
124/* current CPU in the current thread. It is only valid inside cpu_exec() */
125__thread CPUState *current_cpu;126
127struct qemu_work_item {128QSIMPLEQ_ENTRY(qemu_work_item) node;129run_on_cpu_func func;130run_on_cpu_data data;131bool free, exclusive, done;132};133
134static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)135{
136qemu_mutex_lock(&cpu->work_mutex);137QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);138wi->done = false;139qemu_mutex_unlock(&cpu->work_mutex);140
141qemu_cpu_kick(cpu);142}
143
144void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,145QemuMutex *mutex)146{
147struct qemu_work_item wi;148
149if (qemu_cpu_is_self(cpu)) {150func(cpu, data);151return;152}153
154wi.func = func;155wi.data = data;156wi.done = false;157wi.free = false;158wi.exclusive = false;159
160queue_work_on_cpu(cpu, &wi);161while (!qatomic_load_acquire(&wi.done)) {162CPUState *self_cpu = current_cpu;163
164qemu_cond_wait(&qemu_work_cond, mutex);165current_cpu = self_cpu;166}167}
168
169void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)170{
171struct qemu_work_item *wi;172
173wi = g_new0(struct qemu_work_item, 1);174wi->func = func;175wi->data = data;176wi->free = true;177
178queue_work_on_cpu(cpu, wi);179}
180
181/* Wait for pending exclusive operations to complete. The CPU list lock
182must be held. */
183static inline void exclusive_idle(void)184{
185while (pending_cpus) {186qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);187}188}
189
190/* Start an exclusive operation.
191Must only be called from outside cpu_exec. */
192void start_exclusive(void)193{
194CPUState *other_cpu;195int running_cpus;196
197if (current_cpu->exclusive_context_count) {198current_cpu->exclusive_context_count++;199return;200}201
202qemu_mutex_lock(&qemu_cpu_list_lock);203exclusive_idle();204
205/* Make all other cpus stop executing. */206qatomic_set(&pending_cpus, 1);207
208/* Write pending_cpus before reading other_cpu->running. */209smp_mb();210running_cpus = 0;211CPU_FOREACH(other_cpu) {212if (qatomic_read(&other_cpu->running)) {213other_cpu->has_waiter = true;214running_cpus++;215qemu_cpu_kick(other_cpu);216}217}218
219qatomic_set(&pending_cpus, running_cpus + 1);220while (pending_cpus > 1) {221qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);222}223
224/* Can release mutex, no one will enter another exclusive225* section until end_exclusive resets pending_cpus to 0.
226*/
227qemu_mutex_unlock(&qemu_cpu_list_lock);228
229current_cpu->exclusive_context_count = 1;230}
231
232/* Finish an exclusive operation. */
233void end_exclusive(void)234{
235current_cpu->exclusive_context_count--;236if (current_cpu->exclusive_context_count) {237return;238}239
240qemu_mutex_lock(&qemu_cpu_list_lock);241qatomic_set(&pending_cpus, 0);242qemu_cond_broadcast(&exclusive_resume);243qemu_mutex_unlock(&qemu_cpu_list_lock);244}
245
246/* Wait for exclusive ops to finish, and begin cpu execution. */
247void cpu_exec_start(CPUState *cpu)248{
249qatomic_set(&cpu->running, true);250
251/* Write cpu->running before reading pending_cpus. */252smp_mb();253
254/* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.255* After taking the lock we'll see cpu->has_waiter == true and run---not
256* for long because start_exclusive kicked us. cpu_exec_end will
257* decrement pending_cpus and signal the waiter.
258*
259* 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
260* This includes the case when an exclusive item is running now.
261* Then we'll see cpu->has_waiter == false and wait for the item to
262* complete.
263*
264* 3. pending_cpus == 0. Then start_exclusive is definitely going to
265* see cpu->running == true, and it will kick the CPU.
266*/
267if (unlikely(qatomic_read(&pending_cpus))) {268QEMU_LOCK_GUARD(&qemu_cpu_list_lock);269if (!cpu->has_waiter) {270/* Not counted in pending_cpus, let the exclusive item271* run. Since we have the lock, just set cpu->running to true
272* while holding it; no need to check pending_cpus again.
273*/
274qatomic_set(&cpu->running, false);275exclusive_idle();276/* Now pending_cpus is zero. */277qatomic_set(&cpu->running, true);278} else {279/* Counted in pending_cpus, go ahead and release the280* waiter at cpu_exec_end.
281*/
282}283}284}
285
286/* Mark cpu as not executing, and release pending exclusive ops. */
287void cpu_exec_end(CPUState *cpu)288{
289qatomic_set(&cpu->running, false);290
291/* Write cpu->running before reading pending_cpus. */292smp_mb();293
294/* 1. start_exclusive saw cpu->running == true. Then it will increment295* pending_cpus and wait for exclusive_cond. After taking the lock
296* we'll see cpu->has_waiter == true.
297*
298* 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
299* This includes the case when an exclusive item started after setting
300* cpu->running to false and before we read pending_cpus. Then we'll see
301* cpu->has_waiter == false and not touch pending_cpus. The next call to
302* cpu_exec_start will run exclusive_idle if still necessary, thus waiting
303* for the item to complete.
304*
305* 3. pending_cpus == 0. Then start_exclusive is definitely going to
306* see cpu->running == false, and it can ignore this CPU until the
307* next cpu_exec_start.
308*/
309if (unlikely(qatomic_read(&pending_cpus))) {310QEMU_LOCK_GUARD(&qemu_cpu_list_lock);311if (cpu->has_waiter) {312cpu->has_waiter = false;313qatomic_set(&pending_cpus, pending_cpus - 1);314if (pending_cpus == 1) {315qemu_cond_signal(&exclusive_cond);316}317}318}319}
320
321void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,322run_on_cpu_data data)323{
324struct qemu_work_item *wi;325
326wi = g_new0(struct qemu_work_item, 1);327wi->func = func;328wi->data = data;329wi->free = true;330wi->exclusive = true;331
332queue_work_on_cpu(cpu, wi);333}
334
335void free_queued_cpu_work(CPUState *cpu)336{
337while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {338struct qemu_work_item *wi = QSIMPLEQ_FIRST(&cpu->work_list);339QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);340if (wi->free) {341g_free(wi);342}343}344}
345
346void process_queued_cpu_work(CPUState *cpu)347{
348struct qemu_work_item *wi;349
350qemu_mutex_lock(&cpu->work_mutex);351if (QSIMPLEQ_EMPTY(&cpu->work_list)) {352qemu_mutex_unlock(&cpu->work_mutex);353return;354}355while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {356wi = QSIMPLEQ_FIRST(&cpu->work_list);357QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);358qemu_mutex_unlock(&cpu->work_mutex);359if (wi->exclusive) {360/* Running work items outside the BQL avoids the following deadlock:361* 1) start_exclusive() is called with the BQL taken while another
362* CPU is running; 2) cpu_exec in the other CPU tries to takes the
363* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
364* neither CPU can proceed.
365*/
366bql_unlock();367start_exclusive();368wi->func(cpu, wi->data);369end_exclusive();370bql_lock();371} else {372wi->func(cpu, wi->data);373}374qemu_mutex_lock(&cpu->work_mutex);375if (wi->free) {376g_free(wi);377} else {378qatomic_store_release(&wi->done, true);379}380}381qemu_mutex_unlock(&cpu->work_mutex);382qemu_cond_broadcast(&qemu_work_cond);383}
384
385/* Add a breakpoint. */
386int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,387CPUBreakpoint **breakpoint)388{
389CPUClass *cc = CPU_GET_CLASS(cpu);390CPUBreakpoint *bp;391
392if (cc->gdb_adjust_breakpoint) {393pc = cc->gdb_adjust_breakpoint(cpu, pc);394}395
396bp = g_malloc(sizeof(*bp));397
398bp->pc = pc;399bp->flags = flags;400
401/* keep all GDB-injected breakpoints in front */402if (flags & BP_GDB) {403QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);404} else {405QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);406}407
408if (breakpoint) {409*breakpoint = bp;410}411
412trace_breakpoint_insert(cpu->cpu_index, pc, flags);413return 0;414}
415
416/* Remove a specific breakpoint. */
417int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)418{
419CPUClass *cc = CPU_GET_CLASS(cpu);420CPUBreakpoint *bp;421
422if (cc->gdb_adjust_breakpoint) {423pc = cc->gdb_adjust_breakpoint(cpu, pc);424}425
426QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {427if (bp->pc == pc && bp->flags == flags) {428cpu_breakpoint_remove_by_ref(cpu, bp);429return 0;430}431}432return -ENOENT;433}
434
435/* Remove a specific breakpoint by reference. */
436void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)437{
438QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);439
440trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);441g_free(bp);442}
443
444/* Remove all matching breakpoints. */
445void cpu_breakpoint_remove_all(CPUState *cpu, int mask)446{
447CPUBreakpoint *bp, *next;448
449QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {450if (bp->flags & mask) {451cpu_breakpoint_remove_by_ref(cpu, bp);452}453}454}
455