summaryrefslogtreecommitdiffstats
path: root/disas.c
blob: d9aa713a408abd81cf529db2eab741c82b30b9bb (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
/* General "disassemble this chunk" code.  Used for debugging. */
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "disas/bfd.h"
#include "elf.h"

#include "cpu.h"
#include "disas/disas.h"
#include "disas/capstone.h"

typedef struct CPUDebug {
    struct disassemble_info info;
    CPUState *cpu;
} CPUDebug;

/* Filled in by elfload.c.  Simplistic, but will do for now. */
struct syminfo *syminfos = NULL;

/* Get LENGTH bytes from info's buffer, at target address memaddr.
   Transfer them to myaddr.  */
int
buffer_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
                   struct disassemble_info *info)
{
    if (memaddr < info->buffer_vma
        || memaddr + length > info->buffer_vma + info->buffer_length)
        /* Out of bounds.  Use EIO because GDB uses it.  */
        return EIO;
    memcpy (myaddr, info->buffer + (memaddr - info->buffer_vma), length);
    return 0;
}

/* Get LENGTH bytes from info's buffer, at target address memaddr.
   Transfer them to myaddr.  */
static int
target_read_memory (bfd_vma memaddr,
                    bfd_byte *myaddr,
                    int length,
                    struct disassemble_info *info)
{
    CPUDebug *s = container_of(info, CPUDebug, info);

    cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
    return 0;
}

/* Print an error message.  We can assume that this is in response to
   an error return from buffer_read_memory.  */
void
perror_memory (int status, bfd_vma memaddr, struct disassemble_info *info)
{
  if (status != EIO)
    /* Can't happen.  */
    (*info->fprintf_func) (info->stream, "Unknown error %d\n", status);
  else
    /* Actually, address between memaddr and memaddr + len was
       out of bounds.  */
    (*info->fprintf_func) (info->stream,
			   "Address 0x%" PRIx64 " is out of bounds.\n", memaddr);
}

/* This could be in a separate file, to save minuscule amounts of space
   in statically linked executables.  */

/* Just print the address is hex.  This is included for completeness even
   though both GDB and objdump provide their own (to print symbolic
   addresses).  */

void
generic_print_address (bfd_vma addr, struct disassemble_info *info)
{
    (*info->fprintf_func) (info->stream, "0x%" PRIx64, addr);
}

/* Print address in hex, truncated to the width of a host virtual address. */
static void
generic_print_host_address(bfd_vma addr, struct disassemble_info *info)
{
    uint64_t mask = ~0ULL >> (64 - (sizeof(void *) * 8));
    generic_print_address(addr & mask, info);
}

/* Just return the given address.  */

int
generic_symbol_at_address (bfd_vma addr, struct disassemble_info *info)
{
  return 1;
}

bfd_vma bfd_getl64 (const bfd_byte *addr)
{
  unsigned long long v;

  v = (unsigned long long) addr[0];
  v |= (unsigned long long) addr[1] << 8;
  v |= (unsigned long long) addr[2] << 16;
  v |= (unsigned long long) addr[3] << 24;
  v |= (unsigned long long) addr[4] << 32;
  v |= (unsigned long long) addr[5] << 40;
  v |= (unsigned long long) addr[6] << 48;
  v |= (unsigned long long) addr[7] << 56;
  return (bfd_vma) v;
}

bfd_vma bfd_getl32 (const bfd_byte *addr)
{
  unsigned long v;

  v = (unsigned long) addr[0];
  v |= (unsigned long) addr[1] << 8;
  v |= (unsigned long) addr[2] << 16;
  v |= (unsigned long) addr[3] << 24;
  return (bfd_vma) v;
}

bfd_vma bfd_getb32 (const bfd_byte *addr)
{
  unsigned long v;

  v = (unsigned long) addr[0] << 24;
  v |= (unsigned long) addr[1] << 16;
  v |= (unsigned long) addr[2] << 8;
  v |= (unsigned long) addr[3];
  return (bfd_vma) v;
}

bfd_vma bfd_getl16 (const bfd_byte *addr)
{
  unsigned long v;

  v = (unsigned long) addr[0];
  v |= (unsigned long) addr[1] << 8;
  return (bfd_vma) v;
}

bfd_vma bfd_getb16 (const bfd_byte *addr)
{
  unsigned long v;

  v = (unsigned long) addr[0] << 24;
  v |= (unsigned long) addr[1] << 16;
  return (bfd_vma) v;
}

static int print_insn_objdump(bfd_vma pc, disassemble_info *info,
                              const char *prefix)
{
    int i, n = info->buffer_length;
    uint8_t *buf = g_malloc(n);

    info->read_memory_func(pc, buf, n, info);

    for (i = 0; i < n; ++i) {
        if (i % 32 == 0) {
            info->fprintf_func(info->stream, "\n%s: ", prefix);
        }
        info->fprintf_func(info->stream, "%02x", buf[i]);
    }

    g_free(buf);
    return n;
}

static int print_insn_od_host(bfd_vma pc, disassemble_info *info)
{
    return print_insn_objdump(pc, info, "OBJD-H");
}

static int print_insn_od_target(bfd_vma pc, disassemble_info *info)
{
    return print_insn_objdump(pc, info, "OBJD-T");
}

#ifdef CONFIG_CAPSTONE
/* Temporary storage for the capstone library.  This will be alloced via
   malloc with a size private to the library; thus there's no reason not
   to share this across calls and across host vs target disassembly.  */
static __thread cs_insn *cap_insn;

/* Initialize the Capstone library.  */
/* ??? It would be nice to cache this.  We would need one handle for the
   host and one for the target.  For most targets we can reset specific
   parameters via cs_option(CS_OPT_MODE, new_mode), but we cannot change
   CS_ARCH_* in this way.  Thus we would need to be able to close and
   re-open the target handle with a different arch for the target in order
   to handle AArch64 vs AArch32 mode switching.  */
static cs_err cap_disas_start(disassemble_info *info, csh *handle)
{
    cs_mode cap_mode = info->cap_mode;
    cs_err err;

    cap_mode += (info->endian == BFD_ENDIAN_BIG ? CS_MODE_BIG_ENDIAN
                 : CS_MODE_LITTLE_ENDIAN);

    err = cs_open(info->cap_arch, cap_mode, handle);
    if (err != CS_ERR_OK) {
        return err;
    }

    /* ??? There probably ought to be a better place to put this.  */
    if (info->cap_arch == CS_ARCH_X86) {
        /* We don't care about errors (if for some reason the library
           is compiled without AT&T syntax); the user will just have
           to deal with the Intel syntax.  */
        cs_option(*handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
    }

    /* "Disassemble" unknown insns as ".byte W,X,Y,Z".  */
    cs_option(*handle, CS_OPT_SKIPDATA, CS_OPT_ON);

    /* Allocate temp space for cs_disasm_iter.  */
    if (cap_insn == NULL) {
        cap_insn = cs_malloc(*handle);
        if (cap_insn == NULL) {
            cs_close(handle);
            return CS_ERR_MEM;
        }
    }
    return CS_ERR_OK;
}

static void cap_dump_insn_units(disassemble_info *info, cs_insn *insn,
                                int i, int n)
{
    fprintf_function print = info->fprintf_func;
    FILE *stream = info->stream;

    switch (info->cap_insn_unit) {
    case 4:
        if (info->endian == BFD_ENDIAN_BIG) {
            for (; i < n; i += 4) {
                print(stream, " %08x", ldl_be_p(insn->bytes + i));

            }
        } else {
            for (; i < n; i += 4) {
                print(stream, " %08x", ldl_le_p(insn->bytes + i));
            }
        }
        break;

    case 2:
        if (info->endian == BFD_ENDIAN_BIG) {
            for (; i < n; i += 2) {
                print(stream, " %04x", lduw_be_p(insn->bytes + i));
            }
        } else {
            for (; i < n; i += 2) {
                print(stream, " %04x", lduw_le_p(insn->bytes + i));
            }
        }
        break;

    default:
        for (; i < n; i++) {
            print(stream, " %02x", insn->bytes[i]);
        }
        break;
    }
}

static void cap_dump_insn(disassemble_info *info, cs_insn *insn)
{
    fprintf_function print = info->fprintf_func;
    int i, n, split;

    print(info->stream, "0x%08" PRIx64 ": ", insn->address);

    n = insn->size;
    split = info->cap_insn_split;

    /* Dump the first SPLIT bytes of the instruction.  */
    cap_dump_insn_units(info, insn, 0, MIN(n, split));

    /* Add padding up to SPLIT so that mnemonics line up.  */
    if (n < split) {
        int width = (split - n) / info->cap_insn_unit;
        width *= (2 * info->cap_insn_unit + 1);
        print(info->stream, "%*s", width, "");
    }

    /* Print the actual instruction.  */
    print(info->stream, "  %-8s %s\n", insn->mnemonic, insn->op_str);

    /* Dump any remaining part of the insn on subsequent lines.  */
    for (i = split; i < n; i += split) {
        print(info->stream, "0x%08" PRIx64 ": ", insn->address + i);
        cap_dump_insn_units(info, insn, i, MIN(n, i + split));
        print(info->stream, "\n");
    }
}

/* Disassemble SIZE bytes at PC for the target.  */
static bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size)
{
    uint8_t cap_buf[1024];
    csh handle;
    cs_insn *insn;
    size_t csize = 0;

    if (cap_disas_start(info, &handle) != CS_ERR_OK) {
        return false;
    }
    insn = cap_insn;

    while (1) {
        size_t tsize = MIN(sizeof(cap_buf) - csize, size);
        const uint8_t *cbuf = cap_buf;

        target_read_memory(pc + csize, cap_buf + csize, tsize, info);
        csize += tsize;
        size -= tsize;

        while (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
           cap_dump_insn(info, insn);
        }

        /* If the target memory is not consumed, go back for more... */
        if (size != 0) {
            /* ... taking care to move any remaining fractional insn
               to the beginning of the buffer.  */
            if (csize != 0) {
                memmove(cap_buf, cbuf, csize);
            }
            continue;
        }

        /* Since the target memory is consumed, we should not have
           a remaining fractional insn.  */
        if (csize != 0) {
            (*info->fprintf_func)(info->stream,
                "Disassembler disagrees with translator "
                "over instruction decoding\n"
                "Please report this to qemu-devel@nongnu.org\n");
        }
        break;
    }

    cs_close(&handle);
    return true;
}

/* Disassemble SIZE bytes at CODE for the host.  */
static bool cap_disas_host(disassemble_info *info, void *code, size_t size)
{
    csh handle;
    const uint8_t *cbuf;
    cs_insn *insn;
    uint64_t pc;

    if (cap_disas_start(info, &handle) != CS_ERR_OK) {
        return false;
    }
    insn = cap_insn;

    cbuf = code;
    pc = (uintptr_t)code;

    while (cs_disasm_iter(handle, &cbuf, &size, &pc, insn)) {
       cap_dump_insn(info, insn);
    }
    if (size != 0) {
        (*info->fprintf_func)(info->stream,
            "Disassembler disagrees with TCG over instruction encoding\n"
            "Please report this to qemu-devel@nongnu.org\n");
    }

    cs_close(&handle);
    return true;
}

#if !defined(CONFIG_USER_ONLY)
/* Disassemble COUNT insns at PC for the target.  */
static bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count)
{
    uint8_t cap_buf[32];
    csh handle;
    cs_insn *insn;
    size_t csize = 0;

    if (cap_disas_start(info, &handle) != CS_ERR_OK) {
        return false;
    }
    insn = cap_insn;

    while (1) {
        /* We want to read memory for one insn, but generically we do not
           know how much memory that is.  We have a small buffer which is
           known to be sufficient for all supported targets.  Try to not
           read beyond the page, Just In Case.  For even more simplicity,
           ignore the actual target page size and use a 1k boundary.  If
           that turns out to be insufficient, we'll come back around the
           loop and read more.  */
        uint64_t epc = QEMU_ALIGN_UP(pc + csize + 1, 1024);
        size_t tsize = MIN(sizeof(cap_buf) - csize, epc - pc);
        const uint8_t *cbuf = cap_buf;

        /* Make certain that we can make progress.  */
        assert(tsize != 0);
        info->read_memory_func(pc, cap_buf + csize, tsize, info);
        csize += tsize;

        if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
            cap_dump_insn(info, insn);
            if (--count <= 0) {
                break;
            }
        }
        memmove(cap_buf, cbuf, csize);
    }

    cs_close(&handle);
    return true;
}
#endif /* !CONFIG_USER_ONLY */
#else
# define cap_disas_target(i, p, s)  false
# define cap_disas_host(i, p, s)  false
# define cap_disas_monitor(i, p, c)  false
#endif /* CONFIG_CAPSTONE */

/* Disassemble this for me please... (debugging).  */
void target_disas(FILE *out, CPUState *cpu, target_ulong code,
                  target_ulong size)
{
    CPUClass *cc = CPU_GET_CLASS(cpu);
    target_ulong pc;
    int count;
    CPUDebug s;

    INIT_DISASSEMBLE_INFO(s.info, out, fprintf);

    s.cpu = cpu;
    s.info.read_memory_func = target_read_memory;
    s.info.buffer_vma = code;
    s.info.buffer_length = size;
    s.info.print_address_func = generic_print_address;
    s.info.cap_arch = -1;
    s.info.cap_mode = 0;
    s.info.cap_insn_unit = 4;
    s.info.cap_insn_split = 4;

#ifdef TARGET_WORDS_BIGENDIAN
    s.info.endian = BFD_ENDIAN_BIG;
#else
    s.info.endian = BFD_ENDIAN_LITTLE;
#endif

    if (cc->disas_set_info) {
        cc->disas_set_info(cpu, &s.info);
    }

    if (s.info.cap_arch >= 0 && cap_disas_target(&s.info, code, size)) {
        return;
    }

    if (s.info.print_insn == NULL) {
        s.info.print_insn = print_insn_od_target;
    }

    for (pc = code; size > 0; pc += count, size -= count) {
	fprintf(out, "0x" TARGET_FMT_lx ":  ", pc);
	count = s.info.print_insn(pc, &s.info);
	fprintf(out, "\n");
	if (count < 0)
	    break;
        if (size < count) {
            fprintf(out,
                    "Disassembler disagrees with translator over instruction "
                    "decoding\n"
                    "Please report this to qemu-devel@nongnu.org\n");
            break;
        }
    }
}

/* Disassemble this for me please... (debugging). */
void disas(FILE *out, void *code, unsigned long size)
{
    uintptr_t pc;
    int count;
    CPUDebug s;
    int (*print_insn)(bfd_vma pc, disassemble_info *info) = NULL;

    INIT_DISASSEMBLE_INFO(s.info, out, fprintf);
    s.info.print_address_func = generic_print_host_address;

    s.info.buffer = code;
    s.info.buffer_vma = (uintptr_t)code;
    s.info.buffer_length = size;
    s.info.cap_arch = -1;
    s.info.cap_mode = 0;
    s.info.cap_insn_unit = 4;
    s.info.cap_insn_split = 4;

#ifdef HOST_WORDS_BIGENDIAN
    s.info.endian = BFD_ENDIAN_BIG;
#else
    s.info.endian = BFD_ENDIAN_LITTLE;
#endif
#if defined(CONFIG_TCG_INTERPRETER)
    print_insn = print_insn_tci;
#elif defined(__i386__)
    s.info.mach = bfd_mach_i386_i386;
    print_insn = print_insn_i386;
    s.info.cap_arch = CS_ARCH_X86;
    s.info.cap_mode = CS_MODE_32;
    s.info.cap_insn_unit = 1;
    s.info.cap_insn_split = 8;
#elif defined(__x86_64__)
    s.info.mach = bfd_mach_x86_64;
    print_insn = print_insn_i386;
    s.info.cap_arch = CS_ARCH_X86;
    s.info.cap_mode = CS_MODE_64;
    s.info.cap_insn_unit = 1;
    s.info.cap_insn_split = 8;
#elif defined(_ARCH_PPC)
    s.info.disassembler_options = (char *)"any";
    print_insn = print_insn_ppc;
    s.info.cap_arch = CS_ARCH_PPC;
# ifdef _ARCH_PPC64
    s.info.cap_mode = CS_MODE_64;
# endif
#elif defined(__riscv) && defined(CONFIG_RISCV_DIS)
#if defined(_ILP32) || (__riscv_xlen == 32)
    print_insn = print_insn_riscv32;
#elif defined(_LP64)
    print_insn = print_insn_riscv64;
#else
#error unsupported RISC-V ABI
#endif
#elif defined(__aarch64__) && defined(CONFIG_ARM_A64_DIS)
    print_insn = print_insn_arm_a64;
    s.info.cap_arch = CS_ARCH_ARM64;
#elif defined(__alpha__)
    print_insn = print_insn_alpha;
#elif defined(__sparc__)
    print_insn = print_insn_sparc;
    s.info.mach = bfd_mach_sparc_v9b;
#elif defined(__arm__)
    print_insn = print_insn_arm;
    s.info.cap_arch = CS_ARCH_ARM;
    /* TCG only generates code for arm mode.  */
#elif defined(__MIPSEB__)
    print_insn = print_insn_big_mips;
#elif defined(__MIPSEL__)
    print_insn = print_insn_little_mips;
#elif defined(__m68k__)
    print_insn = print_insn_m68k;
#elif defined(__s390__)
    print_insn = print_insn_s390;
#elif defined(__hppa__)
    print_insn = print_insn_hppa;
#endif

    if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) {
        return;
    }

    if (print_insn == NULL) {
        print_insn = print_insn_od_host;
    }
    for (pc = (uintptr_t)code; size > 0; pc += count, size -= count) {
        fprintf(out, "0x%08" PRIxPTR ":  ", pc);
        count = print_insn(pc, &s.info);
	fprintf(out, "\n");
	if (count < 0)
	    break;
    }
}

/* Look up symbol for debugging purpose.  Returns "" if unknown. */
const char *lookup_symbol(target_ulong orig_addr)
{
    const char *symbol = "";
    struct syminfo *s;

    for (s = syminfos; s; s = s->next) {
        symbol = s->lookup_symbol(s, orig_addr);
        if (symbol[0] != '\0') {
            break;
        }
    }

    return symbol;
}

#if !defined(CONFIG_USER_ONLY)

#include "monitor/monitor.h"

static int
physical_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
                     struct disassemble_info *info)
{
    CPUDebug *s = container_of(info, CPUDebug, info);

    address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED,
                       myaddr, length);
    return 0;
}

/* Disassembler for the monitor.  */
void monitor_disas(Monitor *mon, CPUState *cpu,
                   target_ulong pc, int nb_insn, int is_physical)
{
    CPUClass *cc = CPU_GET_CLASS(cpu);
    int count, i;
    CPUDebug s;

    INIT_DISASSEMBLE_INFO(s.info, (FILE *)mon, monitor_fprintf);

    s.cpu = cpu;
    s.info.read_memory_func
        = (is_physical ? physical_read_memory : target_read_memory);
    s.info.print_address_func = generic_print_address;
    s.info.buffer_vma = pc;
    s.info.cap_arch = -1;
    s.info.cap_mode = 0;
    s.info.cap_insn_unit = 4;
    s.info.cap_insn_split = 4;

#ifdef TARGET_WORDS_BIGENDIAN
    s.info.endian = BFD_ENDIAN_BIG;
#else
    s.info.endian = BFD_ENDIAN_LITTLE;
#endif

    if (cc->disas_set_info) {
        cc->disas_set_info(cpu, &s.info);
    }

    if (s.info.cap_arch >= 0 && cap_disas_monitor(&s.info, pc, nb_insn)) {
        return;
    }

    if (!s.info.print_insn) {
        monitor_printf(mon, "0x" TARGET_FMT_lx
                       ": Asm output not supported on this arch\n", pc);
        return;
    }

    for(i = 0; i < nb_insn; i++) {
	monitor_printf(mon, "0x" TARGET_FMT_lx ":  ", pc);
        count = s.info.print_insn(pc, &s.info);
	monitor_printf(mon, "\n");
	if (count < 0)
	    break;
        pc += count;
    }
}
#endif
span class="hl opt">} static bool cpu_thread_is_idle(CPUState *cpu) { if (cpu->stop || cpu->queued_work_first) { return false; } if (cpu_is_stopped(cpu)) { return true; } if (!cpu->halted || cpu_has_work(cpu) || kvm_halt_in_kernel()) { return false; } return true; } static bool all_cpu_threads_idle(void) { CPUState *cpu; CPU_FOREACH(cpu) { if (!cpu_thread_is_idle(cpu)) { return false; } } return true; } /***********************************************************/ /* guest cycle counter */ /* Protected by TimersState seqlock */ static bool icount_sleep = true; /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ #define MAX_ICOUNT_SHIFT 10 typedef struct TimersState { /* Protected by BQL. */ int64_t cpu_ticks_prev; int64_t cpu_ticks_offset; /* Protect fields that can be respectively read outside the * BQL, and written from multiple threads. */ QemuSeqLock vm_clock_seqlock; QemuSpin vm_clock_lock; int16_t cpu_ticks_enabled; /* Conversion factor from emulated instructions to virtual clock ticks. */ int16_t icount_time_shift; /* Compensate for varying guest execution speed. */ int64_t qemu_icount_bias; int64_t vm_clock_warp_start; int64_t cpu_clock_offset; /* Only written by TCG thread */ int64_t qemu_icount; /* for adjusting icount */ QEMUTimer *icount_rt_timer; QEMUTimer *icount_vm_timer; QEMUTimer *icount_warp_timer; } TimersState; static TimersState timers_state; bool mttcg_enabled; /* * We default to false if we know other options have been enabled * which are currently incompatible with MTTCG. Otherwise when each * guest (target) has been updated to support: * - atomic instructions * - memory ordering primitives (barriers) * they can set the appropriate CONFIG flags in ${target}-softmmu.mak * * Once a guest architecture has been converted to the new primitives * there are two remaining limitations to check. * * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) * - The host must have a stronger memory order than the guest * * It may be possible in future to support strong guests on weak hosts * but that will require tagging all load/stores in a guest with their * implicit memory order requirements which would likely slow things * down a lot. */ static bool check_tcg_memory_orders_compatible(void) { #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; #else return false; #endif } static bool default_mttcg_enabled(void) { if (use_icount || TCG_OVERSIZED_GUEST) { return false; } else { #ifdef TARGET_SUPPORTS_MTTCG return check_tcg_memory_orders_compatible(); #else return false; #endif } } void qemu_tcg_configure(QemuOpts *opts, Error **errp) { const char *t = qemu_opt_get(opts, "thread"); if (t) { if (strcmp(t, "multi") == 0) { if (TCG_OVERSIZED_GUEST) { error_setg(errp, "No MTTCG when guest word size > hosts"); } else if (use_icount) { error_setg(errp, "No MTTCG when icount is enabled"); } else { #ifndef TARGET_SUPPORTS_MTTCG warn_report("Guest not yet converted to MTTCG - " "you may get unexpected results"); #endif if (!check_tcg_memory_orders_compatible()) { warn_report("Guest expects a stronger memory ordering " "than the host provides"); error_printf("This may cause strange/hard to debug errors\n"); } mttcg_enabled = true; } } else if (strcmp(t, "single") == 0) { mttcg_enabled = false; } else { error_setg(errp, "Invalid 'thread' setting %s", t); } } else { mttcg_enabled = default_mttcg_enabled(); } } /* The current number of executed instructions is based on what we * originally budgeted minus the current state of the decrementing * icount counters in extra/u16.low. */ static int64_t cpu_get_icount_executed(CPUState *cpu) { return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra); } /* * Update the global shared timer_state.qemu_icount to take into * account executed instructions. This is done by the TCG vCPU * thread so the main-loop can see time has moved forward. */ static void cpu_update_icount_locked(CPUState *cpu) { int64_t executed = cpu_get_icount_executed(cpu); cpu->icount_budget -= executed; atomic_set_i64(&timers_state.qemu_icount, timers_state.qemu_icount + executed); } /* * Update the global shared timer_state.qemu_icount to take into * account executed instructions. This is done by the TCG vCPU * thread so the main-loop can see time has moved forward. */ void cpu_update_icount(CPUState *cpu) { seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); cpu_update_icount_locked(cpu); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); } static int64_t cpu_get_icount_raw_locked(void) { CPUState *cpu = current_cpu; if (cpu && cpu->running) { if (!cpu->can_do_io) { error_report("Bad icount read"); exit(1); } /* Take into account what has run */ cpu_update_icount_locked(cpu); } /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ return atomic_read_i64(&timers_state.qemu_icount); } static int64_t cpu_get_icount_locked(void) { int64_t icount = cpu_get_icount_raw_locked(); return atomic_read_i64(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount); } int64_t cpu_get_icount_raw(void) { int64_t icount; unsigned start; do { start = seqlock_read_begin(&timers_state.vm_clock_seqlock); icount = cpu_get_icount_raw_locked(); } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); return icount; } /* Return the virtual CPU time, based on the instruction counter. */ int64_t cpu_get_icount(void) { int64_t icount; unsigned start; do { start = seqlock_read_begin(&timers_state.vm_clock_seqlock); icount = cpu_get_icount_locked(); } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); return icount; } int64_t cpu_icount_to_ns(int64_t icount) { return icount << atomic_read(&timers_state.icount_time_shift); } static int64_t cpu_get_ticks_locked(void) { int64_t ticks = timers_state.cpu_ticks_offset; if (timers_state.cpu_ticks_enabled) { ticks += cpu_get_host_ticks(); } if (timers_state.cpu_ticks_prev > ticks) { /* Non increasing ticks may happen if the host uses software suspend. */ timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; ticks = timers_state.cpu_ticks_prev; } timers_state.cpu_ticks_prev = ticks; return ticks; } /* return the time elapsed in VM between vm_start and vm_stop. Unless * icount is active, cpu_get_ticks() uses units of the host CPU cycle * counter. */ int64_t cpu_get_ticks(void) { int64_t ticks; if (use_icount) { return cpu_get_icount(); } qemu_spin_lock(&timers_state.vm_clock_lock); ticks = cpu_get_ticks_locked(); qemu_spin_unlock(&timers_state.vm_clock_lock); return ticks; } static int64_t cpu_get_clock_locked(void) { int64_t time; time = timers_state.cpu_clock_offset; if (timers_state.cpu_ticks_enabled) { time += get_clock(); } return time; } /* Return the monotonic time elapsed in VM, i.e., * the time between vm_start and vm_stop */ int64_t cpu_get_clock(void) { int64_t ti; unsigned start; do { start = seqlock_read_begin(&timers_state.vm_clock_seqlock); ti = cpu_get_clock_locked(); } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); return ti; } /* enable cpu_get_ticks() * Caller must hold BQL which serves as mutex for vm_clock_seqlock. */ void cpu_enable_ticks(void) { seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); if (!timers_state.cpu_ticks_enabled) { timers_state.cpu_ticks_offset -= cpu_get_host_ticks(); timers_state.cpu_clock_offset -= get_clock(); timers_state.cpu_ticks_enabled = 1; } seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); } /* disable cpu_get_ticks() : the clock is stopped. You must not call * cpu_get_ticks() after that. * Caller must hold BQL which serves as mutex for vm_clock_seqlock. */ void cpu_disable_ticks(void) { seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); if (timers_state.cpu_ticks_enabled) { timers_state.cpu_ticks_offset += cpu_get_host_ticks(); timers_state.cpu_clock_offset = cpu_get_clock_locked(); timers_state.cpu_ticks_enabled = 0; } seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); } /* Correlation between real and virtual time is always going to be fairly approximate, so ignore small variation. When the guest is idle real and virtual time will be aligned in the IO wait loop. */ #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10) static void icount_adjust(void) { int64_t cur_time; int64_t cur_icount; int64_t delta; /* Protected by TimersState mutex. */ static int64_t last_delta; /* If the VM is not running, then do nothing. */ if (!runstate_is_running()) { return; } seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); cur_time = cpu_get_clock_locked(); cur_icount = cpu_get_icount_locked(); delta = cur_icount - cur_time; /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ if (delta > 0 && last_delta + ICOUNT_WOBBLE < delta * 2 && timers_state.icount_time_shift > 0) { /* The guest is getting too far ahead. Slow time down. */ atomic_set(&timers_state.icount_time_shift, timers_state.icount_time_shift - 1); } if (delta < 0 && last_delta - ICOUNT_WOBBLE > delta * 2 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) { /* The guest is getting too far behind. Speed time up. */ atomic_set(&timers_state.icount_time_shift, timers_state.icount_time_shift + 1); } last_delta = delta; atomic_set_i64(&timers_state.qemu_icount_bias, cur_icount - (timers_state.qemu_icount << timers_state.icount_time_shift)); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); } static void icount_adjust_rt(void *opaque) { timer_mod(timers_state.icount_rt_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); icount_adjust(); } static void icount_adjust_vm(void *opaque) { timer_mod(timers_state.icount_vm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND / 10); icount_adjust(); } static int64_t qemu_icount_round(int64_t count) { int shift = atomic_read(&timers_state.icount_time_shift); return (count + (1 << shift) - 1) >> shift; } static void icount_warp_rt(void) { unsigned seq; int64_t warp_start; /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start * changes from -1 to another value, so the race here is okay. */ do { seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); warp_start = timers_state.vm_clock_warp_start; } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); if (warp_start == -1) { return; } seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); if (runstate_is_running()) { int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT, cpu_get_clock_locked()); int64_t warp_delta; warp_delta = clock - timers_state.vm_clock_warp_start; if (use_icount == 2) { /* * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too * far ahead of real time. */ int64_t cur_icount = cpu_get_icount_locked(); int64_t delta = clock - cur_icount; warp_delta = MIN(warp_delta, delta); } atomic_set_i64(&timers_state.qemu_icount_bias, timers_state.qemu_icount_bias + warp_delta); } timers_state.vm_clock_warp_start = -1; seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } } static void icount_timer_cb(void *opaque) { /* No need for a checkpoint because the timer already synchronizes * with CHECKPOINT_CLOCK_VIRTUAL_RT. */ icount_warp_rt(); } void qtest_clock_warp(int64_t dest) { int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); AioContext *aio_context; assert(qtest_enabled()); aio_context = qemu_get_aio_context(); while (clock < dest) { int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); int64_t warp = qemu_soonest_timeout(dest - clock, deadline); seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); atomic_set_i64(&timers_state.qemu_icount_bias, timers_state.qemu_icount_bias + warp); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]); clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); } qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } void qemu_start_warp_timer(void) { int64_t clock; int64_t deadline; if (!use_icount) { return; } /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers * do not fire, so computing the deadline does not make sense. */ if (!runstate_is_running()) { return; } if (replay_mode != REPLAY_MODE_PLAY) { if (!all_cpu_threads_idle()) { return; } if (qtest_enabled()) { /* When testing, qtest commands advance icount. */ return; } replay_checkpoint(CHECKPOINT_CLOCK_WARP_START); } else { /* warp clock deterministically in record/replay mode */ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { /* vCPU is sleeping and warp can't be started. It is probably a race condition: notification sent to vCPU was processed in advance and vCPU went to sleep. Therefore we have to wake it up for doing someting. */ if (replay_has_checkpoint()) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } return; } } /* We want to use the earliest deadline from ALL vm_clocks */ clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); if (deadline < 0) { static bool notified; if (!icount_sleep && !notified) { warn_report("icount sleep disabled and no active timers"); notified = true; } return; } if (deadline > 0) { /* * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to * sleep. Otherwise, the CPU might be waiting for a future timer * interrupt to wake it up, but the interrupt never comes because * the vCPU isn't running any insns and thus doesn't advance the * QEMU_CLOCK_VIRTUAL. */ if (!icount_sleep) { /* * We never let VCPUs sleep in no sleep icount mode. * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance * to the next QEMU_CLOCK_VIRTUAL event and notify it. * It is useful when we want a deterministic execution time, * isolated from host latencies. */ seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); atomic_set_i64(&timers_state.qemu_icount_bias, timers_state.qemu_icount_bias + deadline); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } else { /* * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some * "real" time, (related to the time left until the next event) has * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this. * This avoids that the warps are visible externally; for example, * you will not be sending network packets continuously instead of * every 100ms. */ seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); if (timers_state.vm_clock_warp_start == -1 || timers_state.vm_clock_warp_start > clock) { timers_state.vm_clock_warp_start = clock; } seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); timer_mod_anticipate(timers_state.icount_warp_timer, clock + deadline); } } else if (deadline == 0) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } } static void qemu_account_warp_timer(void) { if (!use_icount || !icount_sleep) { return; } /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers * do not fire, so computing the deadline does not make sense. */ if (!runstate_is_running()) { return; } /* warp clock deterministically in record/replay mode */ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) { return; } timer_del(timers_state.icount_warp_timer); icount_warp_rt(); } static bool icount_state_needed(void *opaque) { return use_icount; } static bool warp_timer_state_needed(void *opaque) { TimersState *s = opaque; return s->icount_warp_timer != NULL; } static bool adjust_timers_state_needed(void *opaque) { TimersState *s = opaque; return s->icount_rt_timer != NULL; } /* * Subsection for warp timer migration is optional, because may not be created */ static const VMStateDescription icount_vmstate_warp_timer = { .name = "timer/icount/warp_timer", .version_id = 1, .minimum_version_id = 1, .needed = warp_timer_state_needed, .fields = (VMStateField[]) { VMSTATE_INT64(vm_clock_warp_start, TimersState), VMSTATE_TIMER_PTR(icount_warp_timer, TimersState), VMSTATE_END_OF_LIST() } }; static const VMStateDescription icount_vmstate_adjust_timers = { .name = "timer/icount/timers", .version_id = 1, .minimum_version_id = 1, .needed = adjust_timers_state_needed, .fields = (VMStateField[]) { VMSTATE_TIMER_PTR(icount_rt_timer, TimersState), VMSTATE_TIMER_PTR(icount_vm_timer, TimersState), VMSTATE_END_OF_LIST() } }; /* * This is a subsection for icount migration. */ static const VMStateDescription icount_vmstate_timers = { .name = "timer/icount", .version_id = 1, .minimum_version_id = 1, .needed = icount_state_needed, .fields = (VMStateField[]) { VMSTATE_INT64(qemu_icount_bias, TimersState), VMSTATE_INT64(qemu_icount, TimersState), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { &icount_vmstate_warp_timer, &icount_vmstate_adjust_timers, NULL } }; static const VMStateDescription vmstate_timers = { .name = "timer", .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_INT64(cpu_ticks_offset, TimersState), VMSTATE_UNUSED(8), VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { &icount_vmstate_timers, NULL } }; static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) { double pct; double throttle_ratio; long sleeptime_ns; if (!cpu_throttle_get_percentage()) { return; } pct = (double)cpu_throttle_get_percentage()/100; throttle_ratio = pct / (1 - pct); sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS); qemu_mutex_unlock_iothread(); g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */ qemu_mutex_lock_iothread(); atomic_set(&cpu->throttle_thread_scheduled, 0); } static void cpu_throttle_timer_tick(void *opaque) { CPUState *cpu; double pct; /* Stop the timer if needed */ if (!cpu_throttle_get_percentage()) { return; } CPU_FOREACH(cpu) { if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { async_run_on_cpu(cpu, cpu_throttle_thread, RUN_ON_CPU_NULL); } } pct = (double)cpu_throttle_get_percentage()/100; timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + CPU_THROTTLE_TIMESLICE_NS / (1-pct)); } void cpu_throttle_set(int new_throttle_pct) { /* Ensure throttle percentage is within valid range */ new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); atomic_set(&throttle_percentage, new_throttle_pct); timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + CPU_THROTTLE_TIMESLICE_NS); } void cpu_throttle_stop(void) { atomic_set(&throttle_percentage, 0); } bool cpu_throttle_active(void) { return (cpu_throttle_get_percentage() != 0); } int cpu_throttle_get_percentage(void) { return atomic_read(&throttle_percentage); } void cpu_ticks_init(void) { seqlock_init(&timers_state.vm_clock_seqlock); qemu_spin_init(&timers_state.vm_clock_lock); vmstate_register(NULL, 0, &vmstate_timers, &timers_state); throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, cpu_throttle_timer_tick, NULL); } void configure_icount(QemuOpts *opts, Error **errp) { const char *option; char *rem_str = NULL; option = qemu_opt_get(opts, "shift"); if (!option) { if (qemu_opt_get(opts, "align") != NULL) { error_setg(errp, "Please specify shift option when using align"); } return; } icount_sleep = qemu_opt_get_bool(opts, "sleep", true); if (icount_sleep) { timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, icount_timer_cb, NULL); } icount_align_option = qemu_opt_get_bool(opts, "align", false); if (icount_align_option && !icount_sleep) { error_setg(errp, "align=on and sleep=off are incompatible"); } if (strcmp(option, "auto") != 0) { errno = 0; timers_state.icount_time_shift = strtol(option, &rem_str, 0); if (errno != 0 || *rem_str != '\0' || !strlen(option)) { error_setg(errp, "icount: Invalid shift value"); } use_icount = 1; return; } else if (icount_align_option) { error_setg(errp, "shift=auto and align=on are incompatible"); } else if (!icount_sleep) { error_setg(errp, "shift=auto and sleep=off are incompatible"); } use_icount = 2; /* 125MIPS seems a reasonable initial guess at the guest speed. It will be corrected fairly quickly anyway. */ timers_state.icount_time_shift = 3; /* Have both realtime and virtual time triggers for speed adjustment. The realtime trigger catches emulated time passing too slowly, the virtual time trigger catches emulated time passing too fast. Realtime triggers occur even when idle, so use them less frequently than VM triggers. */ timers_state.vm_clock_warp_start = -1; timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, icount_adjust_rt, NULL); timer_mod(timers_state.icount_rt_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, icount_adjust_vm, NULL); timer_mod(timers_state.icount_vm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND / 10); } /***********************************************************/ /* TCG vCPU kick timer * * The kick timer is responsible for moving single threaded vCPU * emulation on to the next vCPU. If more than one vCPU is running a * timer event with force a cpu->exit so the next vCPU can get * scheduled. * * The timer is removed if all vCPUs are idle and restarted again once * idleness is complete. */ static QEMUTimer *tcg_kick_vcpu_timer; static CPUState *tcg_current_rr_cpu; #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) static inline int64_t qemu_tcg_next_kick(void) { return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; } /* Kick the currently round-robin scheduled vCPU */ static void qemu_cpu_kick_rr_cpu(void) { CPUState *cpu; do { cpu = atomic_mb_read(&tcg_current_rr_cpu); if (cpu) { cpu_exit(cpu); } } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); } static void do_nothing(CPUState *cpu, run_on_cpu_data unused) { } void qemu_timer_notify_cb(void *opaque, QEMUClockType type) { if (!use_icount || type != QEMU_CLOCK_VIRTUAL) { qemu_notify_event(); return; } if (qemu_in_vcpu_thread()) { /* A CPU is currently running; kick it back out to the * tcg_cpu_exec() loop so it will recalculate its * icount deadline immediately. */ qemu_cpu_kick(current_cpu); } else if (first_cpu) { /* qemu_cpu_kick is not enough to kick a halted CPU out of * qemu_tcg_wait_io_event. async_run_on_cpu, instead, * causes cpu_thread_is_idle to return false. This way, * handle_icount_deadline can run. * If we have no CPUs at all for some reason, we don't * need to do anything. */ async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL); } } static void kick_tcg_thread(void *opaque) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); qemu_cpu_kick_rr_cpu(); } static void start_tcg_kick_timer(void) { assert(!mttcg_enabled); if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kick_tcg_thread, NULL); } if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); } } static void stop_tcg_kick_timer(void) { assert(!mttcg_enabled); if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) { timer_del(tcg_kick_vcpu_timer); } } /***********************************************************/ void hw_error(const char *fmt, ...) { va_list ap; CPUState *cpu; va_start(ap, fmt); fprintf(stderr, "qemu: hardware error: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); CPU_FOREACH(cpu) { fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU); } va_end(ap); abort(); } void cpu_synchronize_all_states(void) { CPUState *cpu; CPU_FOREACH(cpu) { cpu_synchronize_state(cpu); /* TODO: move to cpu_synchronize_state() */ if (hvf_enabled()) { hvf_cpu_synchronize_state(cpu); } } } void cpu_synchronize_all_post_reset(void) { CPUState *cpu; CPU_FOREACH(cpu) { cpu_synchronize_post_reset(cpu); /* TODO: move to cpu_synchronize_post_reset() */ if (hvf_enabled()) { hvf_cpu_synchronize_post_reset(cpu); } } } void cpu_synchronize_all_post_init(void) { CPUState *cpu; CPU_FOREACH(cpu) { cpu_synchronize_post_init(cpu); /* TODO: move to cpu_synchronize_post_init() */ if (hvf_enabled()) { hvf_cpu_synchronize_post_init(cpu); } } } void cpu_synchronize_all_pre_loadvm(void) { CPUState *cpu; CPU_FOREACH(cpu) { cpu_synchronize_pre_loadvm(cpu); } } static int do_vm_stop(RunState state, bool send_stop) { int ret = 0; if (runstate_is_running()) { cpu_disable_ticks(); pause_all_vcpus(); runstate_set(state); vm_state_notify(0, state); if (send_stop) { qapi_event_send_stop(); } } bdrv_drain_all(); replay_disable_events(); ret = bdrv_flush_all(); return ret; } /* Special vm_stop() variant for terminating the process. Historically clients * did not expect a QMP STOP event and so we need to retain compatibility. */ int vm_shutdown(void) { return do_vm_stop(RUN_STATE_SHUTDOWN, false); } static bool cpu_can_run(CPUState *cpu) { if (cpu->stop) { return false; } if (cpu_is_stopped(cpu)) { return false; } return true; } static void cpu_handle_guest_debug(CPUState *cpu) { gdb_set_stop_cpu(cpu); qemu_system_debug_request(); cpu->stopped = true; } #ifdef CONFIG_LINUX static void sigbus_reraise(void) { sigset_t set; struct sigaction action; memset(&action, 0, sizeof(action)); action.sa_handler = SIG_DFL; if (!sigaction(SIGBUS, &action, NULL)) { raise(SIGBUS); sigemptyset(&set); sigaddset(&set, SIGBUS); pthread_sigmask(SIG_UNBLOCK, &set, NULL); } perror("Failed to re-raise SIGBUS!\n"); abort(); } static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx) { if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) { sigbus_reraise(); } if (current_cpu) { /* Called asynchronously in VCPU thread. */ if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) { sigbus_reraise(); } } else { /* Called synchronously (via signalfd) in main thread. */ if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) { sigbus_reraise(); } } } static void qemu_init_sigbus(void) { struct sigaction action; memset(&action, 0, sizeof(action)); action.sa_flags = SA_SIGINFO; action.sa_sigaction = sigbus_handler; sigaction(SIGBUS, &action, NULL); prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); } #else /* !CONFIG_LINUX */ static void qemu_init_sigbus(void) { } #endif /* !CONFIG_LINUX */ static QemuMutex qemu_global_mutex; static QemuThread io_thread; /* cpu creation */ static QemuCond qemu_cpu_cond; /* system init */ static QemuCond qemu_pause_cond; void qemu_init_cpu_loop(void) { qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); qemu_mutex_init(&qemu_global_mutex); qemu_thread_get_self(&io_thread); } void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { do_run_on_cpu(cpu, func, data, &qemu_global_mutex); } static void qemu_kvm_destroy_vcpu(CPUState *cpu) { if (kvm_destroy_vcpu(cpu) < 0) { error_report("kvm_destroy_vcpu failed"); exit(EXIT_FAILURE); } } static void qemu_tcg_destroy_vcpu(CPUState *cpu) { } static void qemu_cpu_stop(CPUState *cpu, bool exit) { g_assert(qemu_cpu_is_self(cpu)); cpu->stop = false; cpu->stopped = true; if (exit) { cpu_exit(cpu); } qemu_cond_broadcast(&qemu_pause_cond); } static void qemu_wait_io_event_common(CPUState *cpu) { atomic_mb_set(&cpu->thread_kicked, false); if (cpu->stop) { qemu_cpu_stop(cpu, false); } process_queued_cpu_work(cpu); } static void qemu_tcg_rr_wait_io_event(void) { CPUState *cpu; while (all_cpu_threads_idle()) { stop_tcg_kick_timer(); qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); } start_tcg_kick_timer(); CPU_FOREACH(cpu) { qemu_wait_io_event_common(cpu); } } static void qemu_wait_io_event(CPUState *cpu) { while (cpu_thread_is_idle(cpu)) { qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } #ifdef _WIN32 /* Eat dummy APC queued by qemu_cpu_kick_thread. */ if (!tcg_enabled()) { SleepEx(0, TRUE); } #endif qemu_wait_io_event_common(cpu); } static void *qemu_kvm_cpu_thread_fn(void *arg) { CPUState *cpu = arg; int r; rcu_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->can_do_io = 1; current_cpu = cpu; r = kvm_init_vcpu(cpu); if (r < 0) { error_report("kvm_init_vcpu failed: %s", strerror(-r)); exit(1); } kvm_init_cpu_signals(cpu); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); do { if (cpu_can_run(cpu)) { r = kvm_cpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); qemu_kvm_destroy_vcpu(cpu); cpu->created = false; qemu_cond_signal(&qemu_cpu_cond); qemu_mutex_unlock_iothread(); rcu_unregister_thread(); return NULL; } static void *qemu_dummy_cpu_thread_fn(void *arg) { #ifdef _WIN32 error_report("qtest is not supported under Windows"); exit(1); #else CPUState *cpu = arg; sigset_t waitset; int r; rcu_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->can_do_io = 1; current_cpu = cpu; sigemptyset(&waitset); sigaddset(&waitset, SIG_IPI); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); do { qemu_mutex_unlock_iothread(); do { int sig; r = sigwait(&waitset, &sig); } while (r == -1 && (errno == EAGAIN || errno == EINTR)); if (r == -1) { perror("sigwait"); exit(1); } qemu_mutex_lock_iothread(); qemu_wait_io_event(cpu); } while (!cpu->unplug); rcu_unregister_thread(); return NULL; #endif } static int64_t tcg_get_icount_limit(void) { int64_t deadline; if (replay_mode != REPLAY_MODE_PLAY) { deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); /* Maintain prior (possibly buggy) behaviour where if no deadline * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than * INT32_MAX nanoseconds ahead, we still use INT32_MAX * nanoseconds. */ if ((deadline < 0) || (deadline > INT32_MAX)) { deadline = INT32_MAX; } return qemu_icount_round(deadline); } else { return replay_get_instructions(); } } static void handle_icount_deadline(void) { assert(qemu_in_vcpu_thread()); if (use_icount) { int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); if (deadline == 0) { /* Wake up other AioContexts. */ qemu_clock_notify(QEMU_CLOCK_VIRTUAL); qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); } } } static void prepare_icount_for_run(CPUState *cpu) { if (use_icount) { int insns_left; /* These should always be cleared by process_icount_data after * each vCPU execution. However u16.high can be raised * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt */ g_assert(cpu->icount_decr.u16.low == 0); g_assert(cpu->icount_extra == 0); cpu->icount_budget = tcg_get_icount_limit(); insns_left = MIN(0xffff, cpu->icount_budget); cpu->icount_decr.u16.low = insns_left; cpu->icount_extra = cpu->icount_budget - insns_left; replay_mutex_lock(); } } static void process_icount_data(CPUState *cpu) { if (use_icount) { /* Account for executed instructions */ cpu_update_icount(cpu); /* Reset the counters */ cpu->icount_decr.u16.low = 0; cpu->icount_extra = 0; cpu->icount_budget = 0; replay_account_executed_instructions(); replay_mutex_unlock(); } } static int tcg_cpu_exec(CPUState *cpu) { int ret; #ifdef CONFIG_PROFILER int64_t ti; #endif assert(tcg_enabled()); #ifdef CONFIG_PROFILER ti = profile_getclock(); #endif cpu_exec_start(cpu); ret = cpu_exec(cpu); cpu_exec_end(cpu); #ifdef CONFIG_PROFILER atomic_set(&tcg_ctx->prof.cpu_exec_time, tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); #endif return ret; } /* Destroy any remaining vCPUs which have been unplugged and have * finished running */ static void deal_with_unplugged_cpus(void) { CPUState *cpu; CPU_FOREACH(cpu) { if (cpu->unplug && !cpu_can_run(cpu)) { qemu_tcg_destroy_vcpu(cpu); cpu->created = false; qemu_cond_signal(&qemu_cpu_cond); break; } } } /* Single-threaded TCG * * In the single-threaded case each vCPU is simulated in turn. If * there is more than a single vCPU we create a simple timer to kick * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. * This is done explicitly rather than relying on side-effects * elsewhere. */ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) { CPUState *cpu = arg; assert(tcg_enabled()); rcu_register_thread(); tcg_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->created = true; cpu->can_do_io = 1; qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); /* process any pending work */ CPU_FOREACH(cpu) { current_cpu = cpu; qemu_wait_io_event_common(cpu); } } start_tcg_kick_timer(); cpu = first_cpu; /* process any pending work */ cpu->exit_request = 1; while (1) { qemu_mutex_unlock_iothread(); replay_mutex_lock(); qemu_mutex_lock_iothread(); /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_account_warp_timer(); /* Run the timers here. This is much more efficient than * waking up the I/O thread and waiting for completion. */ handle_icount_deadline(); replay_mutex_unlock(); if (!cpu) { cpu = first_cpu; } while (cpu && !cpu->queued_work_first && !cpu->exit_request) { atomic_mb_set(&tcg_current_rr_cpu, cpu); current_cpu = cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { int r; qemu_mutex_unlock_iothread(); prepare_icount_for_run(cpu); r = tcg_cpu_exec(cpu); process_icount_data(cpu); qemu_mutex_lock_iothread(); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } else if (r == EXCP_ATOMIC) { qemu_mutex_unlock_iothread(); cpu_exec_step_atomic(cpu); qemu_mutex_lock_iothread(); break; } } else if (cpu->stop) { if (cpu->unplug) { cpu = CPU_NEXT(cpu); } break; } cpu = CPU_NEXT(cpu); } /* while (cpu && !cpu->exit_request).. */ /* Does not need atomic_mb_set because a spurious wakeup is okay. */ atomic_set(&tcg_current_rr_cpu, NULL); if (cpu && cpu->exit_request) { atomic_mb_set(&cpu->exit_request, 0); } if (use_icount && all_cpu_threads_idle()) { /* * When all cpus are sleeping (e.g in WFI), to avoid a deadlock * in the main_loop, wake it up in order to start the warp timer. */ qemu_notify_event(); } qemu_tcg_rr_wait_io_event(); deal_with_unplugged_cpus(); } rcu_unregister_thread(); return NULL; } static void *qemu_hax_cpu_thread_fn(void *arg) { CPUState *cpu = arg; int r; rcu_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->created = true; cpu->halted = 0; current_cpu = cpu; hax_init_vcpu(cpu); qemu_cond_signal(&qemu_cpu_cond); do { if (cpu_can_run(cpu)) { r = hax_smp_cpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); rcu_unregister_thread(); return NULL; } /* The HVF-specific vCPU thread function. This one should only run when the host * CPU supports the VMX "unrestricted guest" feature. */ static void *qemu_hvf_cpu_thread_fn(void *arg) { CPUState *cpu = arg; int r; assert(hvf_enabled()); rcu_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->can_do_io = 1; current_cpu = cpu; hvf_init_vcpu(cpu); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); do { if (cpu_can_run(cpu)) { r = hvf_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); hvf_vcpu_destroy(cpu); cpu->created = false; qemu_cond_signal(&qemu_cpu_cond); qemu_mutex_unlock_iothread(); rcu_unregister_thread(); return NULL; } static void *qemu_whpx_cpu_thread_fn(void *arg) { CPUState *cpu = arg; int r; rcu_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; r = whpx_init_vcpu(cpu); if (r < 0) { fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r)); exit(1); } /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); do { if (cpu_can_run(cpu)) { r = whpx_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } while (cpu_thread_is_idle(cpu)) { qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); whpx_destroy_vcpu(cpu); cpu->created = false; qemu_cond_signal(&qemu_cpu_cond); qemu_mutex_unlock_iothread(); rcu_unregister_thread(); return NULL; } #ifdef _WIN32 static void CALLBACK dummy_apc_func(ULONG_PTR unused) { } #endif /* Multi-threaded TCG * * In the multi-threaded case each vCPU has its own thread. The TLS * variable current_cpu can be used deep in the code to find the * current CPUState for a given thread. */ static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; assert(tcg_enabled()); g_assert(!use_icount); rcu_register_thread(); tcg_register_thread(); qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->created = true; cpu->can_do_io = 1; current_cpu = cpu; qemu_cond_signal(&qemu_cpu_cond); /* process any pending work */ cpu->exit_request = 1; do { if (cpu_can_run(cpu)) { int r; qemu_mutex_unlock_iothread(); r = tcg_cpu_exec(cpu); qemu_mutex_lock_iothread(); switch (r) { case EXCP_DEBUG: cpu_handle_guest_debug(cpu); break; case EXCP_HALTED: /* during start-up the vCPU is reset and the thread is * kicked several times. If we don't ensure we go back * to sleep in the halted state we won't cleanly * start-up when the vCPU is enabled. * * cpu->halted should ensure we sleep in wait_io_event */ g_assert(cpu->halted); break; case EXCP_ATOMIC: qemu_mutex_unlock_iothread(); cpu_exec_step_atomic(cpu); qemu_mutex_lock_iothread(); default: /* Ignore everything else? */ break; } } atomic_mb_set(&cpu->exit_request, 0); qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); qemu_tcg_destroy_vcpu(cpu); cpu->created = false; qemu_cond_signal(&qemu_cpu_cond); qemu_mutex_unlock_iothread(); rcu_unregister_thread(); return NULL; } static void qemu_cpu_kick_thread(CPUState *cpu) { #ifndef _WIN32 int err; if (cpu->thread_kicked) { return; } cpu->thread_kicked = true; err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err && err != ESRCH) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } #else /* _WIN32 */ if (!qemu_cpu_is_self(cpu)) { if (whpx_enabled()) { whpx_vcpu_kick(cpu); } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n", __func__, GetLastError()); exit(1); } } #endif } void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); if (tcg_enabled()) { cpu_exit(cpu); /* NOP unless doing single-thread RR */ qemu_cpu_kick_rr_cpu(); } else { if (hax_enabled()) { /* * FIXME: race condition with the exit_request check in * hax_vcpu_hax_exec */ cpu->exit_request = 1; } qemu_cpu_kick_thread(cpu); } } void qemu_cpu_kick_self(void) { assert(current_cpu); qemu_cpu_kick_thread(current_cpu); } bool qemu_cpu_is_self(CPUState *cpu) { return qemu_thread_is_self(cpu->thread); } bool qemu_in_vcpu_thread(void) { return current_cpu && qemu_cpu_is_self(current_cpu); } static __thread bool iothread_locked = false; bool qemu_mutex_iothread_locked(void) { return iothread_locked; } /* * The BQL is taken from so many places that it is worth profiling the * callers directly, instead of funneling them all through a single function. */ void qemu_mutex_lock_iothread_impl(const char *file, int line) { QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func); g_assert(!qemu_mutex_iothread_locked()); bql_lock(&qemu_global_mutex, file, line); iothread_locked = true; } void qemu_mutex_unlock_iothread(void) { g_assert(qemu_mutex_iothread_locked()); iothread_locked = false; qemu_mutex_unlock(&qemu_global_mutex); } static bool all_vcpus_paused(void) { CPUState *cpu; CPU_FOREACH(cpu) { if (!cpu->stopped) { return false; } } return true; } void pause_all_vcpus(void) { CPUState *cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); CPU_FOREACH(cpu) { if (qemu_cpu_is_self(cpu)) { qemu_cpu_stop(cpu, true); } else { cpu->stop = true; qemu_cpu_kick(cpu); } } /* We need to drop the replay_lock so any vCPU threads woken up * can finish their replay tasks */ replay_mutex_unlock(); while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); CPU_FOREACH(cpu) { qemu_cpu_kick(cpu); } } qemu_mutex_unlock_iothread(); replay_mutex_lock(); qemu_mutex_lock_iothread(); } void cpu_resume(CPUState *cpu) { cpu->stop = false; cpu->stopped = false; qemu_cpu_kick(cpu); } void resume_all_vcpus(void) { CPUState *cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); CPU_FOREACH(cpu) { cpu_resume(cpu); } } void cpu_remove_sync(CPUState *cpu) { cpu->stop = true; cpu->unplug = true; qemu_cpu_kick(cpu); qemu_mutex_unlock_iothread(); qemu_thread_join(cpu->thread); qemu_mutex_lock_iothread(); } /* For temporary buffers for forming a name */ #define VCPU_THREAD_NAME_SIZE 16 static void qemu_tcg_init_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; static QemuCond *single_tcg_halt_cond; static QemuThread *single_tcg_cpu_thread; static int tcg_region_inited; assert(tcg_enabled()); /* * Initialize TCG regions--once. Now is a good time, because: * (1) TCG's init context, prologue and target globals have been set up. * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the * -accel flag is processed, so the check doesn't work then). */ if (!tcg_region_inited) { tcg_region_inited = 1; tcg_region_init(); } if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) { cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); if (qemu_tcg_mttcg_enabled()) { /* create a thread per vCPU with TCG (MTTCG) */ parallel_cpus = true; snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); } else { /* share a single thread for all cpus with TCG */ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); qemu_thread_create(cpu->thread, thread_name, qemu_tcg_rr_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); single_tcg_halt_cond = cpu->halt_cond; single_tcg_cpu_thread = cpu->thread; } #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif } else { /* For non-MTTCG cases we share the thread */ cpu->thread = single_tcg_cpu_thread; cpu->halt_cond = single_tcg_halt_cond; cpu->thread_id = first_cpu->thread_id; cpu->can_do_io = 1; cpu->created = true; } } static void qemu_hax_start_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif } static void qemu_kvm_start_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); } static void qemu_hvf_start_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; /* HVF currently does not support TCG, and only runs in * unrestricted-guest mode. */ assert(hvf_enabled()); cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); } static void qemu_whpx_start_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif } static void qemu_dummy_start_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); } void qemu_init_vcpu(CPUState *cpu) { cpu->nr_cores = smp_cores; cpu->nr_threads = smp_threads; cpu->stopped = true; if (!cpu->as) { /* If the target cpu hasn't set up any address spaces itself, * give it the default one. */ cpu->num_ases = 1; cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory); } if (kvm_enabled()) { qemu_kvm_start_vcpu(cpu); } else if (hax_enabled()) { qemu_hax_start_vcpu(cpu); } else if (hvf_enabled()) { qemu_hvf_start_vcpu(cpu); } else if (tcg_enabled()) { qemu_tcg_init_vcpu(cpu); } else if (whpx_enabled()) { qemu_whpx_start_vcpu(cpu); } else { qemu_dummy_start_vcpu(cpu); } while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } } void cpu_stop_current(void) { if (current_cpu) { current_cpu->stop = true; cpu_exit(current_cpu); } } int vm_stop(RunState state) { if (qemu_in_vcpu_thread()) { qemu_system_vmstop_request_prepare(); qemu_system_vmstop_request(state); /* * FIXME: should not return to device code in case * vm_stop() has been requested. */ cpu_stop_current(); return 0; } return do_vm_stop(state, true); } /** * Prepare for (re)starting the VM. * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already * running or in case of an error condition), 0 otherwise. */ int vm_prepare_start(void) { RunState requested; qemu_vmstop_requested(&requested); if (runstate_is_running() && requested == RUN_STATE__MAX) { return -1; } /* Ensure that a STOP/RESUME pair of events is emitted if a * vmstop request was pending. The BLOCK_IO_ERROR event, for * example, according to documentation is always followed by * the STOP event. */ if (runstate_is_running()) { qapi_event_send_stop(); qapi_event_send_resume(); return -1; } /* We are sending this now, but the CPUs will be resumed shortly later */ qapi_event_send_resume(); replay_enable_events(); cpu_enable_ticks(); runstate_set(RUN_STATE_RUNNING); vm_state_notify(1, RUN_STATE_RUNNING); return 0; } void vm_start(void) { if (!vm_prepare_start()) { resume_all_vcpus(); } } /* does a state transition even if the VM is already stopped, current state is forgotten forever */ int vm_stop_force_state(RunState state) { if (runstate_is_running()) { return vm_stop(state); } else { runstate_set(state); bdrv_drain_all(); /* Make sure to return an error if the flush in a previous vm_stop() * failed. */ return bdrv_flush_all(); } } void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) { /* XXX: implement xxx_cpu_list for targets that still miss it */ #if defined(cpu_list) cpu_list(f, cpu_fprintf); #endif } CpuInfoList *qmp_query_cpus(Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); MachineClass *mc = MACHINE_GET_CLASS(ms); CpuInfoList *head = NULL, *cur_item = NULL; CPUState *cpu; CPU_FOREACH(cpu) { CpuInfoList *info; #if defined(TARGET_I386) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; #elif defined(TARGET_PPC) PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu); CPUPPCState *env = &ppc_cpu->env; #elif defined(TARGET_SPARC) SPARCCPU *sparc_cpu = SPARC_CPU(cpu); CPUSPARCState *env = &sparc_cpu->env; #elif defined(TARGET_RISCV) RISCVCPU *riscv_cpu = RISCV_CPU(cpu); CPURISCVState *env = &riscv_cpu->env; #elif defined(TARGET_MIPS) MIPSCPU *mips_cpu = MIPS_CPU(cpu); CPUMIPSState *env = &mips_cpu->env; #elif defined(TARGET_TRICORE) TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu); CPUTriCoreState *env = &tricore_cpu->env; #elif defined(TARGET_S390X) S390CPU *s390_cpu = S390_CPU(cpu); CPUS390XState *env = &s390_cpu->env; #endif cpu_synchronize_state(cpu); info = g_malloc0(sizeof(*info)); info->value = g_malloc0(sizeof(*info->value)); info->value->CPU = cpu->cpu_index; info->value->current = (cpu == first_cpu); info->value->halted = cpu->halted; info->value->qom_path = object_get_canonical_path(OBJECT(cpu)); info->value->thread_id = cpu->thread_id; #if defined(TARGET_I386) info->value->arch = CPU_INFO_ARCH_X86; info->value->u.x86.pc = env->eip + env->segs[R_CS].base; #elif defined(TARGET_PPC) info->value->arch = CPU_INFO_ARCH_PPC; info->value->u.ppc.nip = env->nip; #elif defined(TARGET_SPARC) info->value->arch = CPU_INFO_ARCH_SPARC; info->value->u.q_sparc.pc = env->pc; info->value->u.q_sparc.npc = env->npc; #elif defined(TARGET_MIPS) info->value->arch = CPU_INFO_ARCH_MIPS; info->value->u.q_mips.PC = env->active_tc.PC; #elif defined(TARGET_TRICORE) info->value->arch = CPU_INFO_ARCH_TRICORE; info->value->u.tricore.PC = env->PC; #elif defined(TARGET_S390X) info->value->arch = CPU_INFO_ARCH_S390; info->value->u.s390.cpu_state = env->cpu_state; #elif defined(TARGET_RISCV) info->value->arch = CPU_INFO_ARCH_RISCV; info->value->u.riscv.pc = env->pc; #else info->value->arch = CPU_INFO_ARCH_OTHER; #endif info->value->has_props = !!mc->cpu_index_to_instance_props; if (info->value->has_props) { CpuInstanceProperties *props; props = g_malloc0(sizeof(*props)); *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); info->value->props = props; } /* XXX: waiting for the qapi to support GSList */ if (!cur_item) { head = cur_item = info; } else { cur_item->next = info; cur_item = info; } } return head; } static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target) { /* * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script. */ switch (target) { case SYS_EMU_TARGET_I386: case SYS_EMU_TARGET_X86_64: return CPU_INFO_ARCH_X86; case SYS_EMU_TARGET_PPC: case SYS_EMU_TARGET_PPC64: return CPU_INFO_ARCH_PPC; case SYS_EMU_TARGET_SPARC: case SYS_EMU_TARGET_SPARC64: return CPU_INFO_ARCH_SPARC; case SYS_EMU_TARGET_MIPS: case SYS_EMU_TARGET_MIPSEL: case SYS_EMU_TARGET_MIPS64: case SYS_EMU_TARGET_MIPS64EL: return CPU_INFO_ARCH_MIPS; case SYS_EMU_TARGET_TRICORE: return CPU_INFO_ARCH_TRICORE; case SYS_EMU_TARGET_S390X: return CPU_INFO_ARCH_S390; case SYS_EMU_TARGET_RISCV32: case SYS_EMU_TARGET_RISCV64: return CPU_INFO_ARCH_RISCV; default: return CPU_INFO_ARCH_OTHER; } } static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu) { #ifdef TARGET_S390X S390CPU *s390_cpu = S390_CPU(cpu); CPUS390XState *env = &s390_cpu->env; info->cpu_state = env->cpu_state; #else abort(); #endif } /* * fast means: we NEVER interrupt vCPU threads to retrieve * information from KVM. */ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); MachineClass *mc = MACHINE_GET_CLASS(ms); CpuInfoFastList *head = NULL, *cur_item = NULL; SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, -1, &error_abort); CPUState *cpu; CPU_FOREACH(cpu) { CpuInfoFastList *info = g_malloc0(sizeof(*info)); info->value = g_malloc0(sizeof(*info->value)); info->value->cpu_index = cpu->cpu_index; info->value->qom_path = object_get_canonical_path(OBJECT(cpu)); info->value->thread_id = cpu->thread_id; info->value->has_props = !!mc->cpu_index_to_instance_props; if (info->value->has_props) { CpuInstanceProperties *props; props = g_malloc0(sizeof(*props)); *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); info->value->props = props; } info->value->arch = sysemu_target_to_cpuinfo_arch(target); info->value->target = target; if (target == SYS_EMU_TARGET_S390X) { cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu); } if (!cur_item) { head = cur_item = info; } else { cur_item->next = info; cur_item = info; } } return head; } void qmp_memsave(int64_t addr, int64_t size, const char *filename, bool has_cpu, int64_t cpu_index, Error **errp) { FILE *f; uint32_t l; CPUState *cpu; uint8_t buf[1024]; int64_t orig_addr = addr, orig_size = size; if (!has_cpu) { cpu_index = 0; } cpu = qemu_get_cpu(cpu_index); if (cpu == NULL) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index", "a CPU number"); return; } f = fopen(filename, "wb"); if (!f) { error_setg_file_open(errp, errno, filename); return; } while (size != 0) { l = sizeof(buf); if (l > size) l = size; if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64 " specified", orig_addr, orig_size); goto exit; } if (fwrite(buf, 1, l, f) != l) { error_setg(errp, QERR_IO_ERROR); goto exit; } addr += l; size -= l; } exit: fclose(f); } void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, Error **errp) { FILE *f; uint32_t l; uint8_t buf[1024]; f = fopen(filename, "wb"); if (!f) { error_setg_file_open(errp, errno, filename); return; } while (size != 0) { l = sizeof(buf); if (l > size) l = size; cpu_physical_memory_read(addr, buf, l); if (fwrite(buf, 1, l, f) != l) { error_setg(errp, QERR_IO_ERROR); goto exit; } addr += l; size -= l; } exit: fclose(f); } void qmp_inject_nmi(Error **errp) { nmi_monitor_handle(monitor_get_cpu_index(), errp); } void dump_drift_info(FILE *f, fprintf_function cpu_fprintf) { if (!use_icount) { return; } cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n", (cpu_get_clock() - cpu_get_icount())/SCALE_MS); if (icount_align_option) { cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS); cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS); } else { cpu_fprintf(f, "Max guest delay NA\n"); cpu_fprintf(f, "Max guest advance NA\n"); } }