summaryrefslogtreecommitdiffstats
path: root/hw/arm/smmuv3-internal.h
blob: bce161870f697961546912d37a436125e573c4e8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
/*
 * ARM SMMUv3 support - Internal API
 *
 * Copyright (C) 2014-2016 Broadcom Corporation
 * Copyright (c) 2017 Red Hat, Inc.
 * Written by Prem Mallappa, Eric Auger
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, see <http://www.gnu.org/licenses/>.
 */

#ifndef HW_ARM_SMMUV3_INTERNAL_H
#define HW_ARM_SMMUV3_INTERNAL_H

#include "hw/arm/smmu-common.h"

typedef enum SMMUTranslationStatus {
    SMMU_TRANS_DISABLE,
    SMMU_TRANS_ABORT,
    SMMU_TRANS_BYPASS,
    SMMU_TRANS_ERROR,
    SMMU_TRANS_SUCCESS,
} SMMUTranslationStatus;

/* MMIO Registers */

REG32(IDR0,                0x0)
    FIELD(IDR0, S1P,         1 , 1)
    FIELD(IDR0, TTF,         2 , 2)
    FIELD(IDR0, COHACC,      4 , 1)
    FIELD(IDR0, ASID16,      12, 1)
    FIELD(IDR0, TTENDIAN,    21, 2)
    FIELD(IDR0, STALL_MODEL, 24, 2)
    FIELD(IDR0, TERM_MODEL,  26, 1)
    FIELD(IDR0, STLEVEL,     27, 2)

REG32(IDR1,                0x4)
    FIELD(IDR1, SIDSIZE,      0 , 6)
    FIELD(IDR1, EVENTQS,      16, 5)
    FIELD(IDR1, CMDQS,        21, 5)

#define SMMU_IDR1_SIDSIZE 16
#define SMMU_CMDQS   19
#define SMMU_EVENTQS 19

REG32(IDR2,                0x8)
REG32(IDR3,                0xc)
     FIELD(IDR3, HAD,         2, 1);
     FIELD(IDR3, RIL,        10, 1);
     FIELD(IDR3, BBML,       11, 2);
REG32(IDR4,                0x10)
REG32(IDR5,                0x14)
     FIELD(IDR5, OAS,         0, 3);
     FIELD(IDR5, GRAN4K,      4, 1);
     FIELD(IDR5, GRAN16K,     5, 1);
     FIELD(IDR5, GRAN64K,     6, 1);

#define SMMU_IDR5_OAS 4

REG32(IIDR,                0x18)
REG32(AIDR,                0x1c)
REG32(CR0,                 0x20)
    FIELD(CR0, SMMU_ENABLE,   0, 1)
    FIELD(CR0, EVENTQEN,      2, 1)
    FIELD(CR0, CMDQEN,        3, 1)

#define SMMU_CR0_RESERVED 0xFFFFFC20

REG32(CR0ACK,              0x24)
REG32(CR1,                 0x28)
REG32(CR2,                 0x2c)
REG32(STATUSR,             0x40)
REG32(IRQ_CTRL,            0x50)
    FIELD(IRQ_CTRL, GERROR_IRQEN,        0, 1)
    FIELD(IRQ_CTRL, PRI_IRQEN,           1, 1)
    FIELD(IRQ_CTRL, EVENTQ_IRQEN,        2, 1)

REG32(IRQ_CTRL_ACK,        0x54)
REG32(GERROR,              0x60)
    FIELD(GERROR, CMDQ_ERR,           0, 1)
    FIELD(GERROR, EVENTQ_ABT_ERR,     2, 1)
    FIELD(GERROR, PRIQ_ABT_ERR,       3, 1)
    FIELD(GERROR, MSI_CMDQ_ABT_ERR,   4, 1)
    FIELD(GERROR, MSI_EVENTQ_ABT_ERR, 5, 1)
    FIELD(GERROR, MSI_PRIQ_ABT_ERR,   6, 1)
    FIELD(GERROR, MSI_GERROR_ABT_ERR, 7, 1)
    FIELD(GERROR, MSI_SFM_ERR,        8, 1)

REG32(GERRORN,             0x64)

#define A_GERROR_IRQ_CFG0  0x68 /* 64b */
REG32(GERROR_IRQ_CFG1, 0x70)
REG32(GERROR_IRQ_CFG2, 0x74)

#define A_STRTAB_BASE      0x80 /* 64b */

#define SMMU_BASE_ADDR_MASK 0xfffffffffffc0

REG32(STRTAB_BASE_CFG,     0x88)
    FIELD(STRTAB_BASE_CFG, FMT,      16, 2)
    FIELD(STRTAB_BASE_CFG, SPLIT,    6 , 5)
    FIELD(STRTAB_BASE_CFG, LOG2SIZE, 0 , 6)

#define A_CMDQ_BASE        0x90 /* 64b */
REG32(CMDQ_PROD,           0x98)
REG32(CMDQ_CONS,           0x9c)
    FIELD(CMDQ_CONS, ERR, 24, 7)

#define A_EVENTQ_BASE      0xa0 /* 64b */
REG32(EVENTQ_PROD,         0xa8)
REG32(EVENTQ_CONS,         0xac)

#define A_EVENTQ_IRQ_CFG0  0xb0 /* 64b */
REG32(EVENTQ_IRQ_CFG1,     0xb8)
REG32(EVENTQ_IRQ_CFG2,     0xbc)

#define A_IDREGS           0xfd0

static inline int smmu_enabled(SMMUv3State *s)
{
    return FIELD_EX32(s->cr[0], CR0, SMMU_ENABLE);
}

/* Command Queue Entry */
typedef struct Cmd {
    uint32_t word[4];
} Cmd;

/* Event Queue Entry */
typedef struct Evt  {
    uint32_t word[8];
} Evt;

static inline uint32_t smmuv3_idreg(int regoffset)
{
    /*
     * Return the value of the Primecell/Corelink ID registers at the
     * specified offset from the first ID register.
     * These value indicate an ARM implementation of MMU600 p1
     */
    static const uint8_t smmuv3_ids[] = {
        0x04, 0, 0, 0, 0x84, 0xB4, 0xF0, 0x10, 0x0D, 0xF0, 0x05, 0xB1
    };
    return smmuv3_ids[regoffset / 4];
}

static inline bool smmuv3_eventq_irq_enabled(SMMUv3State *s)
{
    return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, EVENTQ_IRQEN);
}

static inline bool smmuv3_gerror_irq_enabled(SMMUv3State *s)
{
    return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN);
}

/* Queue Handling */

#define Q_BASE(q)          ((q)->base & SMMU_BASE_ADDR_MASK)
#define WRAP_MASK(q)       (1 << (q)->log2size)
#define INDEX_MASK(q)      (((1 << (q)->log2size)) - 1)
#define WRAP_INDEX_MASK(q) ((1 << ((q)->log2size + 1)) - 1)

#define Q_CONS(q) ((q)->cons & INDEX_MASK(q))
#define Q_PROD(q) ((q)->prod & INDEX_MASK(q))

#define Q_CONS_ENTRY(q)  (Q_BASE(q) + (q)->entry_size * Q_CONS(q))
#define Q_PROD_ENTRY(q)  (Q_BASE(q) + (q)->entry_size * Q_PROD(q))

#define Q_CONS_WRAP(q) (((q)->cons & WRAP_MASK(q)) >> (q)->log2size)
#define Q_PROD_WRAP(q) (((q)->prod & WRAP_MASK(q)) >> (q)->log2size)

static inline bool smmuv3_q_full(SMMUQueue *q)
{
    return ((q->cons ^ q->prod) & WRAP_INDEX_MASK(q)) == WRAP_MASK(q);
}

static inline bool smmuv3_q_empty(SMMUQueue *q)
{
    return (q->cons & WRAP_INDEX_MASK(q)) == (q->prod & WRAP_INDEX_MASK(q));
}

static inline void queue_prod_incr(SMMUQueue *q)
{
    q->prod = (q->prod + 1) & WRAP_INDEX_MASK(q);
}

static inline void queue_cons_incr(SMMUQueue *q)
{
    /*
     * We have to use deposit for the CONS registers to preserve
     * the ERR field in the high bits.
     */
    q->cons = deposit32(q->cons, 0, q->log2size + 1, q->cons + 1);
}

static inline bool smmuv3_cmdq_enabled(SMMUv3State *s)
{
    return FIELD_EX32(s->cr[0], CR0, CMDQEN);
}

static inline bool smmuv3_eventq_enabled(SMMUv3State *s)
{
    return FIELD_EX32(s->cr[0], CR0, EVENTQEN);
}

static inline void smmu_write_cmdq_err(SMMUv3State *s, uint32_t err_type)
{
    s->cmdq.cons = FIELD_DP32(s->cmdq.cons, CMDQ_CONS, ERR, err_type);
}

/* Commands */

typedef enum SMMUCommandType {
    SMMU_CMD_NONE            = 0x00,
    SMMU_CMD_PREFETCH_CONFIG       ,
    SMMU_CMD_PREFETCH_ADDR,
    SMMU_CMD_CFGI_STE,
    SMMU_CMD_CFGI_STE_RANGE,
    SMMU_CMD_CFGI_CD,
    SMMU_CMD_CFGI_CD_ALL,
    SMMU_CMD_CFGI_ALL,
    SMMU_CMD_TLBI_NH_ALL     = 0x10,
    SMMU_CMD_TLBI_NH_ASID,
    SMMU_CMD_TLBI_NH_VA,
    SMMU_CMD_TLBI_NH_VAA,
    SMMU_CMD_TLBI_EL3_ALL    = 0x18,
    SMMU_CMD_TLBI_EL3_VA     = 0x1a,
    SMMU_CMD_TLBI_EL2_ALL    = 0x20,
    SMMU_CMD_TLBI_EL2_ASID,
    SMMU_CMD_TLBI_EL2_VA,
    SMMU_CMD_TLBI_EL2_VAA,
    SMMU_CMD_TLBI_S12_VMALL  = 0x28,
    SMMU_CMD_TLBI_S2_IPA     = 0x2a,
    SMMU_CMD_TLBI_NSNH_ALL   = 0x30,
    SMMU_CMD_ATC_INV         = 0x40,
    SMMU_CMD_PRI_RESP,
    SMMU_CMD_RESUME          = 0x44,
    SMMU_CMD_STALL_TERM,
    SMMU_CMD_SYNC,
} SMMUCommandType;

static const char *cmd_stringify[] = {
    [SMMU_CMD_PREFETCH_CONFIG] = "SMMU_CMD_PREFETCH_CONFIG",
    [SMMU_CMD_PREFETCH_ADDR]   = "SMMU_CMD_PREFETCH_ADDR",
    [SMMU_CMD_CFGI_STE]        = "SMMU_CMD_CFGI_STE",
    [SMMU_CMD_CFGI_STE_RANGE]  = "SMMU_CMD_CFGI_STE_RANGE",
    [SMMU_CMD_CFGI_CD]         = "SMMU_CMD_CFGI_CD",
    [SMMU_CMD_CFGI_CD_ALL]     = "SMMU_CMD_CFGI_CD_ALL",
    [SMMU_CMD_CFGI_ALL]        = "SMMU_CMD_CFGI_ALL",
    [SMMU_CMD_TLBI_NH_ALL]     = "SMMU_CMD_TLBI_NH_ALL",
    [SMMU_CMD_TLBI_NH_ASID]    = "SMMU_CMD_TLBI_NH_ASID",
    [SMMU_CMD_TLBI_NH_VA]      = "SMMU_CMD_TLBI_NH_VA",
    [SMMU_CMD_TLBI_NH_VAA]     = "SMMU_CMD_TLBI_NH_VAA",
    [SMMU_CMD_TLBI_EL3_ALL]    = "SMMU_CMD_TLBI_EL3_ALL",
    [SMMU_CMD_TLBI_EL3_VA]     = "SMMU_CMD_TLBI_EL3_VA",
    [SMMU_CMD_TLBI_EL2_ALL]    = "SMMU_CMD_TLBI_EL2_ALL",
    [SMMU_CMD_TLBI_EL2_ASID]   = "SMMU_CMD_TLBI_EL2_ASID",
    [SMMU_CMD_TLBI_EL2_VA]     = "SMMU_CMD_TLBI_EL2_VA",
    [SMMU_CMD_TLBI_EL2_VAA]    = "SMMU_CMD_TLBI_EL2_VAA",
    [SMMU_CMD_TLBI_S12_VMALL]  = "SMMU_CMD_TLBI_S12_VMALL",
    [SMMU_CMD_TLBI_S2_IPA]     = "SMMU_CMD_TLBI_S2_IPA",
    [SMMU_CMD_TLBI_NSNH_ALL]   = "SMMU_CMD_TLBI_NSNH_ALL",
    [SMMU_CMD_ATC_INV]         = "SMMU_CMD_ATC_INV",
    [SMMU_CMD_PRI_RESP]        = "SMMU_CMD_PRI_RESP",
    [SMMU_CMD_RESUME]          = "SMMU_CMD_RESUME",
    [SMMU_CMD_STALL_TERM]      = "SMMU_CMD_STALL_TERM",
    [SMMU_CMD_SYNC]            = "SMMU_CMD_SYNC",
};

static inline const char *smmu_cmd_string(SMMUCommandType type)
{
    if (type > SMMU_CMD_NONE && type < ARRAY_SIZE(cmd_stringify)) {
        return cmd_stringify[type] ? cmd_stringify[type] : "UNKNOWN";
    } else {
        return "INVALID";
    }
}

/* CMDQ fields */

typedef enum {
    SMMU_CERROR_NONE = 0,
    SMMU_CERROR_ILL,
    SMMU_CERROR_ABT,
    SMMU_CERROR_ATC_INV_SYNC,
} SMMUCmdError;

enum { /* Command completion notification */
    CMD_SYNC_SIG_NONE,
    CMD_SYNC_SIG_IRQ,
    CMD_SYNC_SIG_SEV,
};

#define CMD_TYPE(x)         extract32((x)->word[0], 0 , 8)
#define CMD_NUM(x)          extract32((x)->word[0], 12 , 5)
#define CMD_SCALE(x)        extract32((x)->word[0], 20 , 5)
#define CMD_SSEC(x)         extract32((x)->word[0], 10, 1)
#define CMD_SSV(x)          extract32((x)->word[0], 11, 1)
#define CMD_RESUME_AC(x)    extract32((x)->word[0], 12, 1)
#define CMD_RESUME_AB(x)    extract32((x)->word[0], 13, 1)
#define CMD_SYNC_CS(x)      extract32((x)->word[0], 12, 2)
#define CMD_SSID(x)         extract32((x)->word[0], 12, 20)
#define CMD_SID(x)          ((x)->word[1])
#define CMD_VMID(x)         extract32((x)->word[1], 0 , 16)
#define CMD_ASID(x)         extract32((x)->word[1], 16, 16)
#define CMD_RESUME_STAG(x)  extract32((x)->word[2], 0 , 16)
#define CMD_RESP(x)         extract32((x)->word[2], 11, 2)
#define CMD_LEAF(x)         extract32((x)->word[2], 0 , 1)
#define CMD_TTL(x)          extract32((x)->word[2], 8 , 2)
#define CMD_TG(x)           extract32((x)->word[2], 10, 2)
#define CMD_STE_RANGE(x)    extract32((x)->word[2], 0 , 5)
#define CMD_ADDR(x) ({                                        \
            uint64_t high = (uint64_t)(x)->word[3];           \
            uint64_t low = extract32((x)->word[2], 12, 20);    \
            uint64_t addr = high << 32 | (low << 12);         \
            addr;                                             \
        })

#define SMMU_FEATURE_2LVL_STE (1 << 0)

/* Events */

typedef enum SMMUEventType {
    SMMU_EVT_NONE               = 0x00,
    SMMU_EVT_F_UUT                    ,
    SMMU_EVT_C_BAD_STREAMID           ,
    SMMU_EVT_F_STE_FETCH              ,
    SMMU_EVT_C_BAD_STE                ,
    SMMU_EVT_F_BAD_ATS_TREQ           ,
    SMMU_EVT_F_STREAM_DISABLED        ,
    SMMU_EVT_F_TRANS_FORBIDDEN        ,
    SMMU_EVT_C_BAD_SUBSTREAMID        ,
    SMMU_EVT_F_CD_FETCH               ,
    SMMU_EVT_C_BAD_CD                 ,
    SMMU_EVT_F_WALK_EABT              ,
    SMMU_EVT_F_TRANSLATION      = 0x10,
    SMMU_EVT_F_ADDR_SIZE              ,
    SMMU_EVT_F_ACCESS                 ,
    SMMU_EVT_F_PERMISSION             ,
    SMMU_EVT_F_TLB_CONFLICT     = 0x20,
    SMMU_EVT_F_CFG_CONFLICT           ,
    SMMU_EVT_E_PAGE_REQ         = 0x24,
} SMMUEventType;

static const char *event_stringify[] = {
    [SMMU_EVT_NONE]                     = "no recorded event",
    [SMMU_EVT_F_UUT]                    = "SMMU_EVT_F_UUT",
    [SMMU_EVT_C_BAD_STREAMID]           = "SMMU_EVT_C_BAD_STREAMID",
    [SMMU_EVT_F_STE_FETCH]              = "SMMU_EVT_F_STE_FETCH",
    [SMMU_EVT_C_BAD_STE]                = "SMMU_EVT_C_BAD_STE",
    [SMMU_EVT_F_BAD_ATS_TREQ]           = "SMMU_EVT_F_BAD_ATS_TREQ",
    [SMMU_EVT_F_STREAM_DISABLED]        = "SMMU_EVT_F_STREAM_DISABLED",
    [SMMU_EVT_F_TRANS_FORBIDDEN]        = "SMMU_EVT_F_TRANS_FORBIDDEN",
    [SMMU_EVT_C_BAD_SUBSTREAMID]        = "SMMU_EVT_C_BAD_SUBSTREAMID",
    [SMMU_EVT_F_CD_FETCH]               = "SMMU_EVT_F_CD_FETCH",
    [SMMU_EVT_C_BAD_CD]                 = "SMMU_EVT_C_BAD_CD",
    [SMMU_EVT_F_WALK_EABT]              = "SMMU_EVT_F_WALK_EABT",
    [SMMU_EVT_F_TRANSLATION]            = "SMMU_EVT_F_TRANSLATION",
    [SMMU_EVT_F_ADDR_SIZE]              = "SMMU_EVT_F_ADDR_SIZE",
    [SMMU_EVT_F_ACCESS]                 = "SMMU_EVT_F_ACCESS",
    [SMMU_EVT_F_PERMISSION]             = "SMMU_EVT_F_PERMISSION",
    [SMMU_EVT_F_TLB_CONFLICT]           = "SMMU_EVT_F_TLB_CONFLICT",
    [SMMU_EVT_F_CFG_CONFLICT]           = "SMMU_EVT_F_CFG_CONFLICT",
    [SMMU_EVT_E_PAGE_REQ]               = "SMMU_EVT_E_PAGE_REQ",
};

static inline const char *smmu_event_string(SMMUEventType type)
{
    if (type < ARRAY_SIZE(event_stringify)) {
        return event_stringify[type] ? event_stringify[type] : "UNKNOWN";
    } else {
        return "INVALID";
    }
}

/*  Encode an event record */
typedef struct SMMUEventInfo {
    SMMUEventType type;
    uint32_t sid;
    bool recorded;
    bool inval_ste_allowed;
    union {
        struct {
            uint32_t ssid;
            bool ssv;
            dma_addr_t addr;
            bool rnw;
            bool pnu;
            bool ind;
       } f_uut;
       struct SSIDInfo {
            uint32_t ssid;
            bool ssv;
       } c_bad_streamid;
       struct SSIDAddrInfo {
            uint32_t ssid;
            bool ssv;
            dma_addr_t addr;
       } f_ste_fetch;
       struct SSIDInfo c_bad_ste;
       struct {
            dma_addr_t addr;
            bool rnw;
       } f_transl_forbidden;
       struct {
            uint32_t ssid;
       } c_bad_substream;
       struct SSIDAddrInfo f_cd_fetch;
       struct SSIDInfo c_bad_cd;
       struct FullInfo {
            bool stall;
            uint16_t stag;
            uint32_t ssid;
            bool ssv;
            bool s2;
            dma_addr_t addr;
            bool rnw;
            bool pnu;
            bool ind;
            uint8_t class;
            dma_addr_t addr2;
       } f_walk_eabt;
       struct FullInfo f_translation;
       struct FullInfo f_addr_size;
       struct FullInfo f_access;
       struct FullInfo f_permission;
       struct SSIDInfo f_cfg_conflict;
       /**
        * not supported yet:
        * F_BAD_ATS_TREQ
        * F_BAD_ATS_TREQ
        * F_TLB_CONFLICT
        * E_PAGE_REQUEST
        * IMPDEF_EVENTn
        */
    } u;
} SMMUEventInfo;

/* EVTQ fields */

#define EVT_Q_OVERFLOW        (1 << 31)

#define EVT_SET_TYPE(x, v)  ((x)->word[0] = deposit32((x)->word[0], 0 , 8 , v))
#define EVT_SET_SSV(x, v)   ((x)->word[0] = deposit32((x)->word[0], 11, 1 , v))
#define EVT_SET_SSID(x, v)  ((x)->word[0] = deposit32((x)->word[0], 12, 20, v))
#define EVT_SET_SID(x, v)   ((x)->word[1] = v)
#define EVT_SET_STAG(x, v)  ((x)->word[2] = deposit32((x)->word[2], 0 , 16, v))
#define EVT_SET_STALL(x, v) ((x)->word[2] = deposit32((x)->word[2], 31, 1 , v))
#define EVT_SET_PNU(x, v)   ((x)->word[3] = deposit32((x)->word[3], 1 , 1 , v))
#define EVT_SET_IND(x, v)   ((x)->word[3] = deposit32((x)->word[3], 2 , 1 , v))
#define EVT_SET_RNW(x, v)   ((x)->word[3] = deposit32((x)->word[3], 3 , 1 , v))
#define EVT_SET_S2(x, v)    ((x)->word[3] = deposit32((x)->word[3], 7 , 1 , v))
#define EVT_SET_CLASS(x, v) ((x)->word[3] = deposit32((x)->word[3], 8 , 2 , v))
#define EVT_SET_ADDR(x, addr)                             \
    do {                                                  \
            (x)->word[5] = (uint32_t)(addr >> 32);        \
            (x)->word[4] = (uint32_t)(addr & 0xffffffff); \
    } while (0)
#define EVT_SET_ADDR2(x, addr)                            \
    do {                                                  \
            (x)->word[7] = (uint32_t)(addr >> 32);        \
            (x)->word[6] = (uint32_t)(addr & 0xffffffff); \
    } while (0)

void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *event);

/* Configuration Data */

/* STE Level 1 Descriptor */
typedef struct STEDesc {
    uint32_t word[2];
} STEDesc;

/* CD Level 1 Descriptor */
typedef struct CDDesc {
    uint32_t word[2];
} CDDesc;

/* Stream Table Entry(STE) */
typedef struct STE {
    uint32_t word[16];
} STE;

/* Context Descriptor(CD) */
typedef struct CD {
    uint32_t word[16];
} CD;

/* STE fields */

#define STE_VALID(x)   extract32((x)->word[0], 0, 1)

#define STE_CONFIG(x)  extract32((x)->word[0], 1, 3)
#define STE_CFG_S1_ENABLED(config) (config & 0x1)
#define STE_CFG_S2_ENABLED(config) (config & 0x2)
#define STE_CFG_ABORT(config)      (!(config & 0x4))
#define STE_CFG_BYPASS(config)     (config == 0x4)

#define STE_S1FMT(x)       extract32((x)->word[0], 4 , 2)
#define STE_S1CDMAX(x)     extract32((x)->word[1], 27, 5)
#define STE_S1STALLD(x)    extract32((x)->word[2], 27, 1)
#define STE_EATS(x)        extract32((x)->word[2], 28, 2)
#define STE_STRW(x)        extract32((x)->word[2], 30, 2)
#define STE_S2VMID(x)      extract32((x)->word[4], 0 , 16)
#define STE_S2T0SZ(x)      extract32((x)->word[5], 0 , 6)
#define STE_S2SL0(x)       extract32((x)->word[5], 6 , 2)
#define STE_S2TG(x)        extract32((x)->word[5], 14, 2)
#define STE_S2PS(x)        extract32((x)->word[5], 16, 3)
#define STE_S2AA64(x)      extract32((x)->word[5], 19, 1)
#define STE_S2HD(x)        extract32((x)->word[5], 24, 1)
#define STE_S2HA(x)        extract32((x)->word[5], 25, 1)
#define STE_S2S(x)         extract32((x)->word[5], 26, 1)
#define STE_CTXPTR(x)                                           \
    ({                                                          \
        unsigned long addr;                                     \
        addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32;  \
        addr |= (uint64_t)((x)->word[0] & 0xffffffc0);          \
        addr;                                                   \
    })

#define STE_S2TTB(x)                                            \
    ({                                                          \
        unsigned long addr;                                     \
        addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32;  \
        addr |= (uint64_t)((x)->word[6] & 0xfffffff0);          \
        addr;                                                   \
    })

static inline int oas2bits(int oas_field)
{
    switch (oas_field) {
    case 0:
        return 32;
    case 1:
        return 36;
    case 2:
        return 40;
    case 3:
        return 42;
    case 4:
        return 44;
    case 5:
        return 48;
    }
    return -1;
}

static inline int pa_range(STE *ste)
{
    int oas_field = MIN(STE_S2PS(ste), SMMU_IDR5_OAS);

    if (!STE_S2AA64(ste)) {
        return 40;
    }

    return oas2bits(oas_field);
}

#define MAX_PA(ste) ((1 << pa_range(ste)) - 1)

/* CD fields */

#define CD_VALID(x)   extract32((x)->word[0], 31, 1)
#define CD_ASID(x)    extract32((x)->word[1], 16, 16)
#define CD_TTB(x, sel)                                      \
    ({                                                      \
        uint64_t hi, lo;                                    \
        hi = extract32((x)->word[(sel) * 2 + 3], 0, 19);    \
        hi <<= 32;                                          \
        lo = (x)->word[(sel) * 2 + 2] & ~0xfULL;            \
        hi | lo;                                            \
    })
#define CD_HAD(x, sel)   extract32((x)->word[(sel) * 2 + 2], 1, 1)

#define CD_TSZ(x, sel)   extract32((x)->word[0], (16 * (sel)) + 0, 6)
#define CD_TG(x, sel)    extract32((x)->word[0], (16 * (sel)) + 6, 2)
#define CD_EPD(x, sel)   extract32((x)->word[0], (16 * (sel)) + 14, 1)
#define CD_ENDI(x)       extract32((x)->word[0], 15, 1)
#define CD_IPS(x)        extract32((x)->word[1], 0 , 3)
#define CD_TBI(x)        extract32((x)->word[1], 6 , 2)
#define CD_HD(x)         extract32((x)->word[1], 10 , 1)
#define CD_HA(x)         extract32((x)->word[1], 11 , 1)
#define CD_S(x)          extract32((x)->word[1], 12, 1)
#define CD_R(x)          extract32((x)->word[1], 13, 1)
#define CD_A(x)          extract32((x)->word[1], 14, 1)
#define CD_AARCH64(x)    extract32((x)->word[1], 9 , 1)

/**
 * tg2granule - Decodes the CD translation granule size field according
 * to the ttbr in use
 * @bits: TG0/1 fields
 * @ttbr: ttbr index in use
 */
static inline int tg2granule(int bits, int ttbr)
{
    switch (bits) {
    case 0:
        return ttbr ? 0  : 12;
    case 1:
        return ttbr ? 14 : 16;
    case 2:
        return ttbr ? 12 : 14;
    case 3:
        return ttbr ? 16 :  0;
    default:
        return 0;
    }
}

static inline uint64_t l1std_l2ptr(STEDesc *desc)
{
    uint64_t hi, lo;

    hi = desc->word[1];
    lo = desc->word[0] & ~0x1fULL;
    return hi << 32 | lo;
}

#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 5))

#endif
>load_reg(DisasContext *s, int reg) { TCGv tmp = new_tmp(); load_reg_var(s, tmp, reg); return tmp; } /* Set a CPU register. The source must be a temporary and will be marked as dead. */ static void store_reg(DisasContext *s, int reg, TCGv var) { if (reg == 31) { tcg_gen_andi_i32(var, var, ~3); s->is_jmp = DISAS_JUMP; } tcg_gen_mov_i32(cpu_R[reg], var); dead_tmp(var); } /* Value extensions. */ #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var) #define gen_uxth(var) tcg_gen_ext16u_i32(var, var) #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var) #define gen_sxth(var) tcg_gen_ext16s_i32(var, var) #define UCOP_REG_M (((insn) >> 0) & 0x1f) #define UCOP_REG_N (((insn) >> 19) & 0x1f) #define UCOP_REG_D (((insn) >> 14) & 0x1f) #define UCOP_REG_S (((insn) >> 9) & 0x1f) #define UCOP_REG_LO (((insn) >> 14) & 0x1f) #define UCOP_REG_HI (((insn) >> 9) & 0x1f) #define UCOP_SH_OP (((insn) >> 6) & 0x03) #define UCOP_SH_IM (((insn) >> 9) & 0x1f) #define UCOP_OPCODES (((insn) >> 25) & 0x0f) #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff) #define UCOP_IMM10 (((insn) >> 0) & 0x3ff) #define UCOP_IMM14 (((insn) >> 0) & 0x3fff) #define UCOP_COND (((insn) >> 25) & 0x0f) #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f) #define UCOP_CPNUM (((insn) >> 10) & 0x0f) #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03) #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f) #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f) #define UCOP_SET(i) ((insn) & (1 << (i))) #define UCOP_SET_P UCOP_SET(28) #define UCOP_SET_U UCOP_SET(27) #define UCOP_SET_B UCOP_SET(26) #define UCOP_SET_W UCOP_SET(25) #define UCOP_SET_L UCOP_SET(24) #define UCOP_SET_S UCOP_SET(24) #define ILLEGAL cpu_abort(env_cpu(env), \ "Illegal UniCore32 instruction %x at line %d!", \ insn, __LINE__) #ifndef CONFIG_USER_ONLY static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv tmp, tmp2, tmp3; if ((insn & 0xfe000000) == 0xe0000000) { tmp2 = new_tmp(); tmp3 = new_tmp(); tcg_gen_movi_i32(tmp2, UCOP_REG_N); tcg_gen_movi_i32(tmp3, UCOP_IMM10); if (UCOP_SET_L) { tmp = new_tmp(); gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3); store_reg(s, UCOP_REG_D, tmp); } else { tmp = load_reg(s, UCOP_REG_D); gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3); dead_tmp(tmp); } dead_tmp(tmp2); dead_tmp(tmp3); return; } ILLEGAL; } static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv tmp; if ((insn & 0xff003fff) == 0xe1000400) { /* * movc rd, pp.nn, #imm9 * rd: UCOP_REG_D * nn: UCOP_REG_N (must be 0) * imm9: 0 */ if (UCOP_REG_N == 0) { tmp = new_tmp(); tcg_gen_movi_i32(tmp, 0); store_reg(s, UCOP_REG_D, tmp); return; } else { ILLEGAL; } } if ((insn & 0xff003fff) == 0xe0000401) { /* * movc pp.nn, rn, #imm9 * rn: UCOP_REG_D * nn: UCOP_REG_N (must be 1) * imm9: 1 */ if (UCOP_REG_N == 1) { tmp = load_reg(s, UCOP_REG_D); gen_helper_cp1_putc(tmp); dead_tmp(tmp); return; } else { ILLEGAL; } } ILLEGAL; } #endif static inline void gen_set_asr(TCGv var, uint32_t mask) { TCGv tmp_mask = tcg_const_i32(mask); gen_helper_asr_write(cpu_env, var, tmp_mask); tcg_temp_free_i32(tmp_mask); } /* Set NZCV flags from the high 4 bits of var. */ #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV) static void gen_exception(int excp) { TCGv tmp = new_tmp(); tcg_gen_movi_i32(tmp, excp); gen_helper_exception(cpu_env, tmp); dead_tmp(tmp); } #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF)) /* Set CF to the top bit of var. */ static void gen_set_CF_bit31(TCGv var) { TCGv tmp = new_tmp(); tcg_gen_shri_i32(tmp, var, 31); gen_set_CF(tmp); dead_tmp(tmp); } /* Set N and Z flags from var. */ static inline void gen_logic_CC(TCGv var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF)); tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF)); } /* dest = T0 + T1 + CF. */ static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) { TCGv tmp; tcg_gen_add_i32(dest, t0, t1); tmp = load_cpu_field(CF); tcg_gen_add_i32(dest, dest, tmp); dead_tmp(tmp); } /* dest = T0 - T1 + CF - 1. */ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) { TCGv tmp; tcg_gen_sub_i32(dest, t0, t1); tmp = load_cpu_field(CF); tcg_gen_add_i32(dest, dest, tmp); tcg_gen_subi_i32(dest, dest, 1); dead_tmp(tmp); } static void shifter_out_im(TCGv var, int shift) { TCGv tmp = new_tmp(); if (shift == 0) { tcg_gen_andi_i32(tmp, var, 1); } else { tcg_gen_shri_i32(tmp, var, shift); if (shift != 31) { tcg_gen_andi_i32(tmp, tmp, 1); } } gen_set_CF(tmp); dead_tmp(tmp); } /* Shift by immediate. Includes special handling for shift == 0. */ static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift, int flags) { switch (shiftop) { case 0: /* LSL */ if (shift != 0) { if (flags) { shifter_out_im(var, 32 - shift); } tcg_gen_shli_i32(var, var, shift); } break; case 1: /* LSR */ if (shift == 0) { if (flags) { tcg_gen_shri_i32(var, var, 31); gen_set_CF(var); } tcg_gen_movi_i32(var, 0); } else { if (flags) { shifter_out_im(var, shift - 1); } tcg_gen_shri_i32(var, var, shift); } break; case 2: /* ASR */ if (shift == 0) { shift = 32; } if (flags) { shifter_out_im(var, shift - 1); } if (shift == 32) { shift = 31; } tcg_gen_sari_i32(var, var, shift); break; case 3: /* ROR/RRX */ if (shift != 0) { if (flags) { shifter_out_im(var, shift - 1); } tcg_gen_rotri_i32(var, var, shift); break; } else { TCGv tmp = load_cpu_field(CF); if (flags) { shifter_out_im(var, 0); } tcg_gen_shri_i32(var, var, 1); tcg_gen_shli_i32(tmp, tmp, 31); tcg_gen_or_i32(var, var, tmp); dead_tmp(tmp); } } }; static inline void gen_uc32_shift_reg(TCGv var, int shiftop, TCGv shift, int flags) { if (flags) { switch (shiftop) { case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break; case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break; case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break; case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break; } } else { switch (shiftop) { case 0: gen_helper_shl(var, var, shift); break; case 1: gen_helper_shr(var, var, shift); break; case 2: gen_helper_sar(var, var, shift); break; case 3: tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_rotr_i32(var, var, shift); break; } } dead_tmp(shift); } static void gen_test_cc(int cc, TCGLabel *label) { TCGv tmp; TCGv tmp2; TCGLabel *inv; switch (cc) { case 0: /* eq: Z */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 1: /* ne: !Z */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); break; case 2: /* cs: C */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); break; case 3: /* cc: !C */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 4: /* mi: N */ tmp = load_cpu_field(NF); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 5: /* pl: !N */ tmp = load_cpu_field(NF); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 6: /* vs: V */ tmp = load_cpu_field(VF); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 7: /* vc: !V */ tmp = load_cpu_field(VF); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 8: /* hi: C && !Z */ inv = gen_new_label(); tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); dead_tmp(tmp); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); gen_set_label(inv); break; case 9: /* ls: !C || Z */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); dead_tmp(tmp); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 10: /* ge: N == V -> N ^ V == 0 */ tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 11: /* lt: N != V -> N ^ V != 0 */ tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 12: /* gt: !Z && N == V */ inv = gen_new_label(); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); dead_tmp(tmp); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); gen_set_label(inv); break; case 13: /* le: Z || N != V */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); dead_tmp(tmp); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; default: fprintf(stderr, "Bad condition code 0x%x\n", cc); abort(); } dead_tmp(tmp); } static const uint8_t table_logic_cc[16] = { 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */ 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */ 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */ 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */ }; /* Set PC state from an immediate address. */ static inline void gen_bx_im(DisasContext *s, uint32_t addr) { s->is_jmp = DISAS_UPDATE; tcg_gen_movi_i32(cpu_R[31], addr & ~3); } /* Set PC state from var. var is marked as dead. */ static inline void gen_bx(DisasContext *s, TCGv var) { s->is_jmp = DISAS_UPDATE; tcg_gen_andi_i32(cpu_R[31], var, ~3); dead_tmp(var); } static inline void store_reg_bx(DisasContext *s, int reg, TCGv var) { store_reg(s, reg, var); } static inline TCGv gen_ld8s(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld8s(tmp, addr, index); return tmp; } static inline TCGv gen_ld8u(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld8u(tmp, addr, index); return tmp; } static inline TCGv gen_ld16s(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld16s(tmp, addr, index); return tmp; } static inline TCGv gen_ld16u(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld16u(tmp, addr, index); return tmp; } static inline TCGv gen_ld32(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld32u(tmp, addr, index); return tmp; } static inline void gen_st8(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st8(val, addr, index); dead_tmp(val); } static inline void gen_st16(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st16(val, addr, index); dead_tmp(val); } static inline void gen_st32(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st32(val, addr, index); dead_tmp(val); } static inline void gen_set_pc_im(uint32_t val) { tcg_gen_movi_i32(cpu_R[31], val); } /* Force a TB lookup after an instruction that changes the CPU state. */ static inline void gen_lookup_tb(DisasContext *s) { tcg_gen_movi_i32(cpu_R[31], s->pc & ~1); s->is_jmp = DISAS_UPDATE; } static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, TCGv var) { int val; TCGv offset; if (UCOP_SET(29)) { /* immediate */ val = UCOP_IMM14; if (!UCOP_SET_U) { val = -val; } if (val != 0) { tcg_gen_addi_i32(var, var, val); } } else { /* shift/register */ offset = load_reg(s, UCOP_REG_M); gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0); if (!UCOP_SET_U) { tcg_gen_sub_i32(var, var, offset); } else { tcg_gen_add_i32(var, var, offset); } dead_tmp(offset); } } static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, TCGv var) { int val; TCGv offset; if (UCOP_SET(26)) { /* immediate */ val = (insn & 0x1f) | ((insn >> 4) & 0x3e0); if (!UCOP_SET_U) { val = -val; } if (val != 0) { tcg_gen_addi_i32(var, var, val); } } else { /* register */ offset = load_reg(s, UCOP_REG_M); if (!UCOP_SET_U) { tcg_gen_sub_i32(var, var, offset); } else { tcg_gen_add_i32(var, var, offset); } dead_tmp(offset); } } static inline long ucf64_reg_offset(int reg) { if (reg & 1) { return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1]) + offsetof(CPU_DoubleU, l.upper); } else { return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1]) + offsetof(CPU_DoubleU, l.lower); } } #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg)) #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg)) /* UniCore-F64 single load/store I_offset */ static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { int offset; TCGv tmp; TCGv addr; addr = load_reg(s, UCOP_REG_N); if (!UCOP_SET_P && !UCOP_SET_W) { ILLEGAL; } if (UCOP_SET_P) { offset = UCOP_IMM10 << 2; if (!UCOP_SET_U) { offset = -offset; } if (offset != 0) { tcg_gen_addi_i32(addr, addr, offset); } } if (UCOP_SET_L) { /* load */ tmp = gen_ld32(addr, IS_USER(s)); ucf64_gen_st32(tmp, UCOP_REG_D); } else { /* store */ tmp = ucf64_gen_ld32(UCOP_REG_D); gen_st32(tmp, addr, IS_USER(s)); } if (!UCOP_SET_P) { offset = UCOP_IMM10 << 2; if (!UCOP_SET_U) { offset = -offset; } if (offset != 0) { tcg_gen_addi_i32(addr, addr, offset); } } if (UCOP_SET_W) { store_reg(s, UCOP_REG_N, addr); } else { dead_tmp(addr); } } /* UniCore-F64 load/store multiple words */ static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { unsigned int i; int j, n, freg; TCGv tmp; TCGv addr; if (UCOP_REG_D != 0) { ILLEGAL; } if (UCOP_REG_N == 31) { ILLEGAL; } if ((insn << 24) == 0) { ILLEGAL; } addr = load_reg(s, UCOP_REG_N); n = 0; for (i = 0; i < 8; i++) { if (UCOP_SET(i)) { n++; } } if (UCOP_SET_U) { if (UCOP_SET_P) { /* pre increment */ tcg_gen_addi_i32(addr, addr, 4); } /* unnecessary to do anything when post increment */ } else { if (UCOP_SET_P) { /* pre decrement */ tcg_gen_addi_i32(addr, addr, -(n * 4)); } else { /* post decrement */ if (n != 1) { tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } } } freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */ for (i = 0, j = 0; i < 8; i++, freg++) { if (!UCOP_SET(i)) { continue; } if (UCOP_SET_L) { /* load */ tmp = gen_ld32(addr, IS_USER(s)); ucf64_gen_st32(tmp, freg); } else { /* store */ tmp = ucf64_gen_ld32(freg); gen_st32(tmp, addr, IS_USER(s)); } j++; /* unnecessary to add after the last transfer */ if (j != n) { tcg_gen_addi_i32(addr, addr, 4); } } if (UCOP_SET_W) { /* write back */ if (UCOP_SET_U) { if (!UCOP_SET_P) { /* post increment */ tcg_gen_addi_i32(addr, addr, 4); } /* unnecessary to do anything when pre increment */ } else { if (UCOP_SET_P) { /* pre decrement */ if (n != 1) { tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } } else { /* post decrement */ tcg_gen_addi_i32(addr, addr, -(n * 4)); } } store_reg(s, UCOP_REG_N, addr); } else { dead_tmp(addr); } } /* UniCore-F64 mrc/mcr */ static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv tmp; if ((insn & 0xfe0003ff) == 0xe2000000) { /* control register */ if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) { ILLEGAL; } if (UCOP_SET(24)) { /* CFF */ tmp = new_tmp(); gen_helper_ucf64_get_fpscr(tmp, cpu_env); store_reg(s, UCOP_REG_D, tmp); } else { /* CTF */ tmp = load_reg(s, UCOP_REG_D); gen_helper_ucf64_set_fpscr(cpu_env, tmp); dead_tmp(tmp); gen_lookup_tb(s); } return; } if ((insn & 0xfe0003ff) == 0xe0000000) { /* general register */ if (UCOP_REG_D == 31) { ILLEGAL; } if (UCOP_SET(24)) { /* MFF */ tmp = ucf64_gen_ld32(UCOP_REG_N); store_reg(s, UCOP_REG_D, tmp); } else { /* MTF */ tmp = load_reg(s, UCOP_REG_D); ucf64_gen_st32(tmp, UCOP_REG_N); } return; } if ((insn & 0xfb000000) == 0xe9000000) { /* MFFC */ if (UCOP_REG_D != 31) { ILLEGAL; } if (UCOP_UCF64_COND & 0x8) { ILLEGAL; } tmp = new_tmp(); tcg_gen_movi_i32(tmp, UCOP_UCF64_COND); if (UCOP_SET(26)) { tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N)); tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env); } else { tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N)); tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env); } dead_tmp(tmp); return; } ILLEGAL; } /* UniCore-F64 convert instructions */ static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { if (UCOP_UCF64_FMT == 3) { ILLEGAL; } if (UCOP_REG_N != 0) { ILLEGAL; } switch (UCOP_UCF64_FUNC) { case 0: /* cvt.s */ switch (UCOP_UCF64_FMT) { case 1 /* d */: tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env); tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); break; case 2 /* w */: tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env); tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); break; default /* s */: ILLEGAL; break; } break; case 1: /* cvt.d */ switch (UCOP_UCF64_FMT) { case 0 /* s */: tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env); tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D)); break; case 2 /* w */: tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env); tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D)); break; default /* d */: ILLEGAL; break; } break; case 4: /* cvt.w */ switch (UCOP_UCF64_FMT) { case 0 /* s */: tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env); tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); break; case 1 /* d */: tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env); tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); break; default /* w */: ILLEGAL; break; } break; default: ILLEGAL; } } /* UniCore-F64 compare instructions */ static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { if (UCOP_SET(25)) { ILLEGAL; } if (UCOP_REG_D != 0) { ILLEGAL; } ILLEGAL; /* TODO */ if (UCOP_SET(24)) { tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N)); tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */ } else { tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N)); tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */ } } #define gen_helper_ucf64_movs(x, y) do { } while (0) #define gen_helper_ucf64_movd(x, y) do { } while (0) #define UCF64_OP1(name) do { \ if (UCOP_REG_N != 0) { \ ILLEGAL; \ } \ switch (UCOP_UCF64_FMT) { \ case 0 /* s */: \ tcg_gen_ld_i32(cpu_F0s, cpu_env, \ ucf64_reg_offset(UCOP_REG_M)); \ gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \ tcg_gen_st_i32(cpu_F0s, cpu_env, \ ucf64_reg_offset(UCOP_REG_D)); \ break; \ case 1 /* d */: \ tcg_gen_ld_i64(cpu_F0d, cpu_env, \ ucf64_reg_offset(UCOP_REG_M)); \ gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \ tcg_gen_st_i64(cpu_F0d, cpu_env, \ ucf64_reg_offset(UCOP_REG_D)); \ break; \ case 2 /* w */: \ ILLEGAL; \ break; \ } \ } while (0) #define UCF64_OP2(name) do { \ switch (UCOP_UCF64_FMT) { \ case 0 /* s */: \ tcg_gen_ld_i32(cpu_F0s, cpu_env, \ ucf64_reg_offset(UCOP_REG_N)); \ tcg_gen_ld_i32(cpu_F1s, cpu_env, \ ucf64_reg_offset(UCOP_REG_M)); \ gen_helper_ucf64_##name##s(cpu_F0s, \ cpu_F0s, cpu_F1s, cpu_env); \ tcg_gen_st_i32(cpu_F0s, cpu_env, \ ucf64_reg_offset(UCOP_REG_D)); \ break; \ case 1 /* d */: \ tcg_gen_ld_i64(cpu_F0d, cpu_env, \ ucf64_reg_offset(UCOP_REG_N)); \ tcg_gen_ld_i64(cpu_F1d, cpu_env, \ ucf64_reg_offset(UCOP_REG_M)); \ gen_helper_ucf64_##name##d(cpu_F0d, \ cpu_F0d, cpu_F1d, cpu_env); \ tcg_gen_st_i64(cpu_F0d, cpu_env, \ ucf64_reg_offset(UCOP_REG_D)); \ break; \ case 2 /* w */: \ ILLEGAL; \ break; \ } \ } while (0) /* UniCore-F64 data processing */ static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { if (UCOP_UCF64_FMT == 3) { ILLEGAL; } switch (UCOP_UCF64_FUNC) { case 0: /* add */ UCF64_OP2(add); break; case 1: /* sub */ UCF64_OP2(sub); break; case 2: /* mul */ UCF64_OP2(mul); break; case 4: /* div */ UCF64_OP2(div); break; case 5: /* abs */ UCF64_OP1(abs); break; case 6: /* mov */ UCF64_OP1(mov); break; case 7: /* neg */ UCF64_OP1(neg); break; default: ILLEGAL; } } /* Disassemble an F64 instruction */ static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { if (!UCOP_SET(29)) { if (UCOP_SET(26)) { do_ucf64_ldst_m(env, s, insn); } else { do_ucf64_ldst_i(env, s, insn); } } else { if (UCOP_SET(5)) { switch ((insn >> 26) & 0x3) { case 0: do_ucf64_datap(env, s, insn); break; case 1: ILLEGAL; break; case 2: do_ucf64_fcvt(env, s, insn); break; case 3: do_ucf64_fcmp(env, s, insn); break; } } else { do_ucf64_trans(env, s, insn); } } } static inline bool use_goto_tb(DisasContext *s, uint32_t dest) { #ifndef CONFIG_USER_ONLY return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); #else return true; #endif } static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) { if (use_goto_tb(s, dest)) { tcg_gen_goto_tb(n); gen_set_pc_im(dest); tcg_gen_exit_tb(s->tb, n); } else { gen_set_pc_im(dest); tcg_gen_exit_tb(NULL, 0); } } static inline void gen_jmp(DisasContext *s, uint32_t dest) { if (unlikely(s->singlestep_enabled)) { /* An indirect jump so that we still trigger the debug exception. */ gen_bx_im(s, dest); } else { gen_goto_tb(s, 0, dest); s->is_jmp = DISAS_TB_JUMP; } } /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0) { TCGv tmp; if (bsr) { /* ??? This is also undefined in system mode. */ if (IS_USER(s)) { return 1; } tmp = load_cpu_field(bsr); tcg_gen_andi_i32(tmp, tmp, ~mask); tcg_gen_andi_i32(t0, t0, mask); tcg_gen_or_i32(tmp, tmp, t0); store_cpu_field(tmp, bsr); } else { gen_set_asr(t0, mask); } dead_tmp(t0); gen_lookup_tb(s); return 0; } /* Generate an old-style exception return. Marks pc as dead. */ static void gen_exception_return(DisasContext *s, TCGv pc) { TCGv tmp; store_reg(s, 31, pc); tmp = load_cpu_field(bsr); gen_set_asr(tmp, 0xffffffff); dead_tmp(tmp); s->is_jmp = DISAS_UPDATE; } static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { switch (UCOP_CPNUM) { #ifndef CONFIG_USER_ONLY case 0: disas_cp0_insn(env, s, insn); break; case 1: disas_ocd_insn(env, s, insn); break; #endif case 2: disas_ucf64_insn(env, s, insn); break; default: /* Unknown coprocessor. */ cpu_abort(env_cpu(env), "Unknown coprocessor!"); } } /* data processing instructions */ static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv tmp; TCGv tmp2; int logic_cc; if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) { if (UCOP_SET(23)) { /* CMOV instructions */ if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) { ILLEGAL; } /* if not always execute, we generate a conditional jump to next instruction */ s->condlabel = gen_new_label(); gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel); s->condjmp = 1; } } logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24); if (UCOP_SET(29)) { unsigned int val; /* immediate operand */ val = UCOP_IMM_9; if (UCOP_SH_IM) { val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM)); } tmp2 = new_tmp(); tcg_gen_movi_i32(tmp2, val); if (logic_cc && UCOP_SH_IM) { gen_set_CF_bit31(tmp2); } } else { /* register */ tmp2 = load_reg(s, UCOP_REG_M); if (UCOP_SET(5)) { tmp = load_reg(s, UCOP_REG_S); gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc); } else { gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc); } } if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) { tmp = load_reg(s, UCOP_REG_N); } else { tmp = NULL; } switch (UCOP_OPCODES) { case 0x00: tcg_gen_and_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x01: tcg_gen_xor_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x02: if (UCOP_SET_S && UCOP_REG_D == 31) { /* SUBS r31, ... is used for exception return. */ if (IS_USER(s)) { ILLEGAL; } gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); gen_exception_return(s, tmp); } else { if (UCOP_SET_S) { gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); } else { tcg_gen_sub_i32(tmp, tmp, tmp2); } store_reg_bx(s, UCOP_REG_D, tmp); } break; case 0x03: if (UCOP_SET_S) { gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp); } else { tcg_gen_sub_i32(tmp, tmp2, tmp); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x04: if (UCOP_SET_S) { gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x05: if (UCOP_SET_S) { gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); } else { gen_add_carry(tmp, tmp, tmp2); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x06: if (UCOP_SET_S) { gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2); } else { gen_sub_carry(tmp, tmp, tmp2); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x07: if (UCOP_SET_S) { gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp); } else { gen_sub_carry(tmp, tmp2, tmp); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x08: if (UCOP_SET_S) { tcg_gen_and_i32(tmp, tmp, tmp2); gen_logic_CC(tmp); } dead_tmp(tmp); break; case 0x09: if (UCOP_SET_S) { tcg_gen_xor_i32(tmp, tmp, tmp2); gen_logic_CC(tmp); } dead_tmp(tmp); break; case 0x0a: if (UCOP_SET_S) { gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); } dead_tmp(tmp); break; case 0x0b: if (UCOP_SET_S) { gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); } dead_tmp(tmp); break; case 0x0c: tcg_gen_or_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, UCOP_REG_D, tmp); break; case 0x0d: if (logic_cc && UCOP_REG_D == 31) { /* MOVS r31, ... is used for exception return. */ if (IS_USER(s)) { ILLEGAL; } gen_exception_return(s, tmp2); } else { if (logic_cc) { gen_logic_CC(tmp2); } store_reg_bx(s, UCOP_REG_D, tmp2); } break; case 0x0e: tcg_gen_andc_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, UCOP_REG_D, tmp); break; default: case 0x0f: tcg_gen_not_i32(tmp2, tmp2); if (logic_cc) { gen_logic_CC(tmp2); } store_reg_bx(s, UCOP_REG_D, tmp2); break; } if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) { dead_tmp(tmp2); } } /* multiply */ static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv tmp, tmp2, tmp3, tmp4; if (UCOP_SET(27)) { /* 64 bit mul */ tmp = load_reg(s, UCOP_REG_M); tmp2 = load_reg(s, UCOP_REG_N); if (UCOP_SET(26)) { tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2); } else { tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2); } if (UCOP_SET(25)) { /* mult accumulate */ tmp3 = load_reg(s, UCOP_REG_LO); tmp4 = load_reg(s, UCOP_REG_HI); tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4); dead_tmp(tmp3); dead_tmp(tmp4); } store_reg(s, UCOP_REG_LO, tmp); store_reg(s, UCOP_REG_HI, tmp2); } else { /* 32 bit mul */ tmp = load_reg(s, UCOP_REG_M); tmp2 = load_reg(s, UCOP_REG_N); tcg_gen_mul_i32(tmp, tmp, tmp2); dead_tmp(tmp2); if (UCOP_SET(25)) { /* Add */ tmp2 = load_reg(s, UCOP_REG_S); tcg_gen_add_i32(tmp, tmp, tmp2); dead_tmp(tmp2); } if (UCOP_SET_S) { gen_logic_CC(tmp); } store_reg(s, UCOP_REG_D, tmp); } } /* miscellaneous instructions */ static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { unsigned int val; TCGv tmp; if ((insn & 0xffffffe0) == 0x10ffc120) { /* Trivial implementation equivalent to bx. */ tmp = load_reg(s, UCOP_REG_M); gen_bx(s, tmp); return; } if ((insn & 0xfbffc000) == 0x30ffc000) { /* PSR = immediate */ val = UCOP_IMM_9; if (UCOP_SH_IM) { val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM)); } tmp = new_tmp(); tcg_gen_movi_i32(tmp, val); if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) { ILLEGAL; } return; } if ((insn & 0xfbffffe0) == 0x12ffc020) { /* PSR.flag = reg */ tmp = load_reg(s, UCOP_REG_M); if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) { ILLEGAL; } return; } if ((insn & 0xfbffffe0) == 0x10ffc020) { /* PSR = reg */ tmp = load_reg(s, UCOP_REG_M); if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) { ILLEGAL; } return; } if ((insn & 0xfbf83fff) == 0x10f80000) { /* reg = PSR */ if (UCOP_SET_B) { if (IS_USER(s)) { ILLEGAL; } tmp = load_cpu_field(bsr); } else { tmp = new_tmp(); gen_helper_asr_read(tmp, cpu_env); } store_reg(s, UCOP_REG_D, tmp); return; } if ((insn & 0xfbf83fe0) == 0x12f80120) { /* clz */ tmp = load_reg(s, UCOP_REG_M); if (UCOP_SET(26)) { /* clo */ tcg_gen_not_i32(tmp, tmp); } tcg_gen_clzi_i32(tmp, tmp, 32); store_reg(s, UCOP_REG_D, tmp); return; } /* otherwise */ ILLEGAL; } /* load/store I_offset and R_offset */ static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { unsigned int mmu_idx; TCGv tmp; TCGv tmp2; tmp2 = load_reg(s, UCOP_REG_N); mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W)); /* immediate */ if (UCOP_SET_P) { gen_add_data_offset(s, insn, tmp2); } if (UCOP_SET_L) { /* load */ if (UCOP_SET_B) { tmp = gen_ld8u(tmp2, mmu_idx); } else { tmp = gen_ld32(tmp2, mmu_idx); } } else { /* store */ tmp = load_reg(s, UCOP_REG_D); if (UCOP_SET_B) { gen_st8(tmp, tmp2, mmu_idx); } else { gen_st32(tmp, tmp2, mmu_idx); } } if (!UCOP_SET_P) { gen_add_data_offset(s, insn, tmp2); store_reg(s, UCOP_REG_N, tmp2); } else if (UCOP_SET_W) { store_reg(s, UCOP_REG_N, tmp2); } else { dead_tmp(tmp2); } if (UCOP_SET_L) { /* Complete the load. */ if (UCOP_REG_D == 31) { gen_bx(s, tmp); } else { store_reg(s, UCOP_REG_D, tmp); } } } /* SWP instruction */ static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv addr; TCGv tmp; TCGv tmp2; if ((insn & 0xff003fe0) != 0x40000120) { ILLEGAL; } /* ??? This is not really atomic. However we know we never have multiple CPUs running in parallel, so it is good enough. */ addr = load_reg(s, UCOP_REG_N); tmp = load_reg(s, UCOP_REG_M); if (UCOP_SET_B) { tmp2 = gen_ld8u(addr, IS_USER(s)); gen_st8(tmp, addr, IS_USER(s)); } else { tmp2 = gen_ld32(addr, IS_USER(s)); gen_st32(tmp, addr, IS_USER(s)); } dead_tmp(addr); store_reg(s, UCOP_REG_D, tmp2); } /* load/store hw/sb */ static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { TCGv addr; TCGv tmp; if (UCOP_SH_OP == 0) { do_swap(env, s, insn); return; } addr = load_reg(s, UCOP_REG_N); if (UCOP_SET_P) { gen_add_datah_offset(s, insn, addr); } if (UCOP_SET_L) { /* load */ switch (UCOP_SH_OP) { case 1: tmp = gen_ld16u(addr, IS_USER(s)); break; case 2: tmp = gen_ld8s(addr, IS_USER(s)); break; default: /* see do_swap */ case 3: tmp = gen_ld16s(addr, IS_USER(s)); break; } } else { /* store */ if (UCOP_SH_OP != 1) { ILLEGAL; } tmp = load_reg(s, UCOP_REG_D); gen_st16(tmp, addr, IS_USER(s)); } /* Perform base writeback before the loaded value to ensure correct behavior with overlapping index registers. */ if (!UCOP_SET_P) { gen_add_datah_offset(s, insn, addr); store_reg(s, UCOP_REG_N, addr); } else if (UCOP_SET_W) { store_reg(s, UCOP_REG_N, addr); } else { dead_tmp(addr); } if (UCOP_SET_L) { /* Complete the load. */ store_reg(s, UCOP_REG_D, tmp); } } /* load/store multiple words */ static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { unsigned int val, i, mmu_idx; int j, n, reg, user, loaded_base; TCGv tmp; TCGv tmp2; TCGv addr; TCGv loaded_var; if (UCOP_SET(7)) { ILLEGAL; } /* XXX: store correct base if write back */ user = 0; if (UCOP_SET_B) { /* S bit in instruction table */ if (IS_USER(s)) { ILLEGAL; /* only usable in supervisor mode */ } if (UCOP_SET(18) == 0) { /* pc reg */ user = 1; } } mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W)); addr = load_reg(s, UCOP_REG_N); /* compute total size */ loaded_base = 0; loaded_var = NULL; n = 0; for (i = 0; i < 6; i++) { if (UCOP_SET(i)) { n++; } } for (i = 9; i < 19; i++) { if (UCOP_SET(i)) { n++; } } /* XXX: test invalid n == 0 case ? */ if (UCOP_SET_U) { if (UCOP_SET_P) { /* pre increment */ tcg_gen_addi_i32(addr, addr, 4); } else { /* post increment */ } } else { if (UCOP_SET_P) { /* pre decrement */ tcg_gen_addi_i32(addr, addr, -(n * 4)); } else { /* post decrement */ if (n != 1) { tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } } } j = 0; reg = UCOP_SET(6) ? 16 : 0; for (i = 0; i < 19; i++, reg++) { if (i == 6) { i = i + 3; } if (UCOP_SET(i)) { if (UCOP_SET_L) { /* load */ tmp = gen_ld32(addr, mmu_idx); if (reg == 31) { gen_bx(s, tmp); } else if (user) { tmp2 = tcg_const_i32(reg); gen_helper_set_user_reg(cpu_env, tmp2, tmp); tcg_temp_free_i32(tmp2); dead_tmp(tmp); } else if (reg == UCOP_REG_N) { loaded_var = tmp; loaded_base = 1; } else { store_reg(s, reg, tmp); } } else { /* store */ if (reg == 31) { /* special case: r31 = PC + 4 */ val = (long)s->pc; tmp = new_tmp(); tcg_gen_movi_i32(tmp, val); } else if (user) { tmp = new_tmp(); tmp2 = tcg_const_i32(reg); gen_helper_get_user_reg(tmp, cpu_env, tmp2); tcg_temp_free_i32(tmp2); } else { tmp = load_reg(s, reg); } gen_st32(tmp, addr, mmu_idx); } j++; /* no need to add after the last transfer */ if (j != n) { tcg_gen_addi_i32(addr, addr, 4); } } } if (UCOP_SET_W) { /* write back */ if (UCOP_SET_U) { if (UCOP_SET_P) { /* pre increment */ } else { /* post increment */ tcg_gen_addi_i32(addr, addr, 4); } } else { if (UCOP_SET_P) { /* pre decrement */ if (n != 1) { tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } } else { /* post decrement */ tcg_gen_addi_i32(addr, addr, -(n * 4)); } } store_reg(s, UCOP_REG_N, addr); } else { dead_tmp(addr); } if (loaded_base) { store_reg(s, UCOP_REG_N, loaded_var); } if (UCOP_SET_B && !user) { /* Restore ASR from BSR. */ tmp = load_cpu_field(bsr); gen_set_asr(tmp, 0xffffffff); dead_tmp(tmp); s->is_jmp = DISAS_UPDATE; } } /* branch (and link) */ static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn) { unsigned int val; int32_t offset; TCGv tmp; if (UCOP_COND == 0xf) { ILLEGAL; } if (UCOP_COND != 0xe) { /* if not always execute, we generate a conditional jump to next instruction */ s->condlabel = gen_new_label(); gen_test_cc(UCOP_COND ^ 1, s->condlabel); s->condjmp = 1; } val = (int32_t)s->pc; if (UCOP_SET_L) { tmp = new_tmp(); tcg_gen_movi_i32(tmp, val); store_reg(s, 30, tmp); } offset = (((int32_t)insn << 8) >> 8); val += (offset << 2); /* unicore is pc+4 */ gen_jmp(s, val); } static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s) { unsigned int insn; insn = cpu_ldl_code(env, s->pc); s->pc += 4; /* UniCore instructions class: * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx * AAA : see switch case * BBBB : opcodes or cond or PUBW * C : S OR L * D : 8 * E : 5 */ switch (insn >> 29) { case 0x0: if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) { do_mult(env, s, insn); break; } if (UCOP_SET(8)) { do_misc(env, s, insn); break; } case 0x1: if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) { do_misc(env, s, insn); break; } do_datap(env, s, insn); break; case 0x2: if (UCOP_SET(8) && UCOP_SET(5)) { do_ldst_hwsb(env, s, insn); break; } if (UCOP_SET(8) || UCOP_SET(5)) { ILLEGAL; } case 0x3: do_ldst_ir(env, s, insn); break; case 0x4: if (UCOP_SET(8)) { ILLEGAL; /* extended instructions */ } do_ldst_m(env, s, insn); break; case 0x5: do_branch(env, s, insn); break; case 0x6: /* Coprocessor. */ disas_coproc_insn(env, s, insn); break; case 0x7: if (!UCOP_SET(28)) { disas_coproc_insn(env, s, insn); break; } if ((insn & 0xff000000) == 0xff000000) { /* syscall */ gen_set_pc_im(s->pc); s->is_jmp = DISAS_SYSCALL; break; } ILLEGAL; } } /* generate intermediate code for basic block 'tb'. */ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { CPUUniCore32State *env = cs->env_ptr; DisasContext dc1, *dc = &dc1; target_ulong pc_start; uint32_t page_start; int num_insns; /* generate intermediate code */ num_temps = 0; pc_start = tb->pc; dc->tb = tb; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = cs->singlestep_enabled; dc->condjmp = 0; cpu_F0s = tcg_temp_new_i32(); cpu_F1s = tcg_temp_new_i32(); cpu_F0d = tcg_temp_new_i64(); cpu_F1d = tcg_temp_new_i64(); page_start = pc_start & TARGET_PAGE_MASK; num_insns = 0; #ifndef CONFIG_USER_ONLY if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) { dc->user = 1; } else { dc->user = 0; } #endif gen_tb_start(tb); do { tcg_gen_insn_start(dc->pc); num_insns++; if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { gen_set_pc_im(dc->pc); gen_exception(EXCP_DEBUG); dc->is_jmp = DISAS_JUMP; /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ dc->pc += 4; goto done_generating; } if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { gen_io_start(); } disas_uc32_insn(env, dc); if (num_temps) { fprintf(stderr, "Internal resource leak before %08x\n", dc->pc); num_temps = 0; } if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; } /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ } while (!dc->is_jmp && !tcg_op_buf_full() && !cs->singlestep_enabled && !singlestep && dc->pc - page_start < TARGET_PAGE_SIZE && num_insns < max_insns); if (tb_cflags(tb) & CF_LAST_IO) { if (dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(cs, "IO on conditional branch instruction"); } } /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ if (unlikely(cs->singlestep_enabled)) { /* Make sure the pc is updated, and raise a debug exception. */ if (dc->condjmp) { if (dc->is_jmp == DISAS_SYSCALL) { gen_exception(UC32_EXCP_PRIV); } else { gen_exception(EXCP_DEBUG); } gen_set_label(dc->condlabel); } if (dc->condjmp || !dc->is_jmp) { gen_set_pc_im(dc->pc); dc->condjmp = 0; } if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) { gen_exception(UC32_EXCP_PRIV); } else { gen_exception(EXCP_DEBUG); } } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middel of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ switch (dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(NULL, 0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; case DISAS_SYSCALL: gen_exception(UC32_EXCP_PRIV); break; } if (dc->condjmp) { gen_set_label(dc->condlabel); gen_goto_tb(dc, 1, dc->pc); dc->condjmp = 0; } } done_generating: gen_tb_end(tb, num_insns); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(pc_start)) { FILE *logfile = qemu_log_lock(); qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(cs, pc_start, dc->pc - pc_start); qemu_log("\n"); qemu_log_unlock(logfile); } #endif tb->size = dc->pc - pc_start; tb->icount = num_insns; } static const char *cpu_mode_names[16] = { "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP", "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR" }; #undef UCF64_DUMP_STATE #ifdef UCF64_DUMP_STATE static void cpu_dump_state_ucf64(CPUUniCore32State *env, int flags) { int i; union { uint32_t i; float s; } s0, s1; CPU_DoubleU d; /* ??? This assumes float64 and double have the same layout. Oh well, it's only debug dumps. */ union { float64 f64; double d; } d0; for (i = 0; i < 16; i++) { d.d = env->ucf64.regs[i]; s0.i = d.l.lower; s1.i = d.l.upper; d0.f64 = d.d; qemu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)", i * 2, (int)s0.i, s0.s, i * 2 + 1, (int)s1.i, s1.s); qemu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n", i, (uint64_t)d0.f64, d0.d); } qemu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]); } #else #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0) #endif void uc32_cpu_dump_state(CPUState *cs, FILE *f, int flags) { UniCore32CPU *cpu = UNICORE32_CPU(cs); CPUUniCore32State *env = &cpu->env; int i; uint32_t psr; for (i = 0; i < 32; i++) { qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]); if ((i % 4) == 3) { qemu_fprintf(f, "\n"); } else { qemu_fprintf(f, " "); } } psr = cpu_asr_read(env); qemu_fprintf(f, "PSR=%08x %c%c%c%c %s\n", psr, psr & (1 << 31) ? 'N' : '-', psr & (1 << 30) ? 'Z' : '-', psr & (1 << 29) ? 'C' : '-', psr & (1 << 28) ? 'V' : '-', cpu_mode_names[psr & 0xf]); if (flags & CPU_DUMP_FPU) { cpu_dump_state_ucf64(env, f, cpu_fprintf, flags); } } void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, target_ulong *data) { env->regs[31] = data[0]; }