target-arm queue:
* Implement FEAT_GCS * Implement FEAT_MEC -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmjpBGAZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3nyaEACV1f4oBSn/rzEgX0PYmYzj jW3tGbEk1i1QFApjkOSbjqNRBKEYLj1LsaeNOVqixRswATe1mMx9ZNWHqJnSd/tw 7XLr7dN+YsVvYViILL4VLrHipYcLrgyC1Vlg+UK5RsuVPV2O4PZw6T0LoV32CSF6 r/LbEGKH4VKHOVMRIR7SJlajmkFbHQvTTj3jjXCgQCUQaKfMzkEGK/UGOt2D3H54 oSrGLif9nRg0o6Ce9NzfC2xb4XSvdwyT3RE84vkuSSlRcmjt9zQEE+kds4yHhAAi D6w1m+Aq8zh4sKJbqVRp9M7ymb5465xv6p/4Av2r3Gxy3v4d0ADgQahel+AYh8Sp urzqZWAR66RLrWSEj51K5nbW8yUM6OYNC/VXrtcMBXgBRMeCYVLgZF3hCrqVyDtv fP61xJBHPd2+nlcJNFEE5yqazFkcpUsoE/gm2lDPPsdPF5DFKky4VkVqJIGreain 25zGj44q9vDY7slMJMW38rbB3f1pxbxlcljG93N8+2ZzPLKz+7ezvgXFpY2lij0r qNn7eFEG80roh+lykTe7BroQSQ+pIAxOXM/ouwr+59fsXtnCKrdFG+96WdS+yhsC 4ss24hvHUvVMGnEGiYbUL/tIwFJku1wBq+a745DiwJwqyVGbavOGApVbrv/9xuWN s2MWF0xy8CnhPBJwyK4iOg== =yKgg -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20251010' of https://gitlab.com/pm215/qemu into staging target-arm queue: * Implement FEAT_GCS * Implement FEAT_MEC # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmjpBGAZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3nyaEACV1f4oBSn/rzEgX0PYmYzj # jW3tGbEk1i1QFApjkOSbjqNRBKEYLj1LsaeNOVqixRswATe1mMx9ZNWHqJnSd/tw # 7XLr7dN+YsVvYViILL4VLrHipYcLrgyC1Vlg+UK5RsuVPV2O4PZw6T0LoV32CSF6 # r/LbEGKH4VKHOVMRIR7SJlajmkFbHQvTTj3jjXCgQCUQaKfMzkEGK/UGOt2D3H54 # oSrGLif9nRg0o6Ce9NzfC2xb4XSvdwyT3RE84vkuSSlRcmjt9zQEE+kds4yHhAAi # D6w1m+Aq8zh4sKJbqVRp9M7ymb5465xv6p/4Av2r3Gxy3v4d0ADgQahel+AYh8Sp # urzqZWAR66RLrWSEj51K5nbW8yUM6OYNC/VXrtcMBXgBRMeCYVLgZF3hCrqVyDtv # fP61xJBHPd2+nlcJNFEE5yqazFkcpUsoE/gm2lDPPsdPF5DFKky4VkVqJIGreain # 25zGj44q9vDY7slMJMW38rbB3f1pxbxlcljG93N8+2ZzPLKz+7ezvgXFpY2lij0r # qNn7eFEG80roh+lykTe7BroQSQ+pIAxOXM/ouwr+59fsXtnCKrdFG+96WdS+yhsC # 4ss24hvHUvVMGnEGiYbUL/tIwFJku1wBq+a745DiwJwqyVGbavOGApVbrv/9xuWN # s2MWF0xy8CnhPBJwyK4iOg== # =yKgg # -----END PGP SIGNATURE----- # gpg: Signature made Fri 10 Oct 2025 06:04:32 AM PDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [unknown] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [unknown] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [unknown] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # gpg: WARNING: The key's User ID is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20251010' of https://gitlab.com/pm215/qemu: (76 commits) target/arm: Enable FEAT_MEC in -cpu max target/arm: Implement FEAT_MEC registers target/arm: Add a cpreg flag to indicate no trap in NV tests/tcg/aarch64: Add gcsss tests/tcg/aarch64: Add gcspushm tests/tcg/aarch64: Add gcsstr linux-user/aarch64: Enable GCS in HWCAP linux-user/aarch64: Generate GCS signal records linux-user/aarch64: Inject SIGSEGV for GCS faults target/arm: Enable GCSPR_EL0 for read in user-mode linux-user/aarch64: Implement map_shadow_stack syscall linux-user/aarch64: Release gcs stack on thread exit linux-user/aarch64: Allocate new gcs stack on clone linux-user/aarch64: Implement prctls for GCS target/arm: Enable FEAT_GCS with -cpu max target/arm: Implement EXLOCK check during exception return target/arm: Copy EXLOCKEn to EXLOCK on exception to the same EL target/arm: Load gcs record for RET with PAuth target/arm: Load gcs record for RET target/arm: Add gcs record for BLR with PAuth ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
f3f2ad1193
48 changed files with 2810 additions and 610 deletions
|
|
@ -90,9 +90,6 @@
|
|||
*/
|
||||
QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
|
||||
|
||||
/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
|
||||
*/
|
||||
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
|
||||
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
|
||||
|
||||
static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ the following architecture extensions:
|
|||
- FEAT_BF16 (AArch64 BFloat16 instructions)
|
||||
- FEAT_BTI (Branch Target Identification)
|
||||
- FEAT_CCIDX (Extended cache index)
|
||||
- FEAT_CHK (Check Feature Status)
|
||||
- FEAT_CMOW (Control for cache maintenance permission)
|
||||
- FEAT_CRC32 (CRC32 instructions)
|
||||
- FEAT_Crypto (Cryptographic Extension)
|
||||
|
|
@ -72,6 +73,7 @@ the following architecture extensions:
|
|||
- FEAT_FRINTTS (Floating-point to integer instructions)
|
||||
- FEAT_FlagM (Flag manipulation instructions v2)
|
||||
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
|
||||
- FEAT_GCS (Guarded Control Stack Extension)
|
||||
- FEAT_GTG (Guest translation granule size)
|
||||
- FEAT_HAFDBS (Hardware management of the access flag and dirty bit state)
|
||||
- FEAT_HBC (Hinted conditional branches)
|
||||
|
|
@ -92,6 +94,9 @@ the following architecture extensions:
|
|||
- FEAT_LSE2 (Large System Extensions v2)
|
||||
- FEAT_LSE128 (128-bit Atomics)
|
||||
- FEAT_LVA (Large Virtual Address space)
|
||||
- FEAT_MEC (Memory Encryption Contexts)
|
||||
|
||||
* This is a register-only implementation without encryption.
|
||||
- FEAT_MixedEnd (Mixed-endian support)
|
||||
- FEAT_MixedEndEL0 (Mixed-endian support at EL0)
|
||||
- FEAT_MOPS (Standardization of memory operations)
|
||||
|
|
@ -123,6 +128,8 @@ the following architecture extensions:
|
|||
- FEAT_RME_GPC2 (RME Granule Protection Check 2 Extension)
|
||||
- FEAT_RNG (Random number generator)
|
||||
- FEAT_RPRES (Increased precision of FRECPE and FRSQRTE)
|
||||
- FEAT_S1PIE (Stage 1 permission indirections)
|
||||
- FEAT_S2PIE (Stage 2 permission indirections)
|
||||
- FEAT_S2FWB (Stage 2 forced Write-Back)
|
||||
- FEAT_SB (Speculation Barrier)
|
||||
- FEAT_SCTLR2 (Extension to SCTLR_ELx)
|
||||
|
|
|
|||
|
|
@ -25,9 +25,10 @@ typedef uint32_t MemOpIdx;
|
|||
static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
assert(idx <= 15);
|
||||
assert(idx <= 31);
|
||||
assert(clz32(op) >= 5);
|
||||
#endif
|
||||
return (op << 4) | idx;
|
||||
return (op << 5) | idx;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -38,7 +39,7 @@ static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
|
|||
*/
|
||||
static inline MemOp get_memop(MemOpIdx oi)
|
||||
{
|
||||
return oi >> 4;
|
||||
return oi >> 5;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -49,7 +50,7 @@ static inline MemOp get_memop(MemOpIdx oi)
|
|||
*/
|
||||
static inline unsigned get_mmuidx(MemOpIdx oi)
|
||||
{
|
||||
return oi & 15;
|
||||
return oi & 31;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -198,10 +198,11 @@ struct CPUClass {
|
|||
};
|
||||
|
||||
/*
|
||||
* Fix the number of mmu modes to 16.
|
||||
* Fix the number of mmu modes across all targets.
|
||||
* Current maximum is target/arm/.
|
||||
*/
|
||||
#define NB_MMU_MODES 16
|
||||
typedef uint16_t MMUIdxMap;
|
||||
#define NB_MMU_MODES 22
|
||||
typedef uint32_t MMUIdxMap;
|
||||
|
||||
/* Use a fully associative victim tlb of 8 entries. */
|
||||
#define CPU_VTLB_SIZE 8
|
||||
|
|
|
|||
|
|
@ -89,6 +89,11 @@ static void signal_for_exception(CPUARMState *env, vaddr addr)
|
|||
si_code = TARGET_ILL_ILLOPN;
|
||||
break;
|
||||
|
||||
case EC_GCS:
|
||||
si_signo = TARGET_SIGSEGV;
|
||||
si_code = TARGET_SEGV_CPERR;
|
||||
break;
|
||||
|
||||
case EC_MOP:
|
||||
/*
|
||||
* FIXME: The kernel fixes up wrong-option exceptions.
|
||||
|
|
|
|||
|
|
@ -169,6 +169,7 @@ abi_ulong get_elf_hwcap(CPUState *cs)
|
|||
GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
|
||||
GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
|
||||
GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
|
||||
GET_FEATURE_ID(aa64_gcs, ARM_HWCAP_A64_GCS);
|
||||
|
||||
return hwcaps;
|
||||
}
|
||||
|
|
|
|||
38
linux-user/aarch64/gcs-internal.h
Normal file
38
linux-user/aarch64/gcs-internal.h
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* AArch64 gcs functions for linux-user
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
#ifndef AARCH64_GCS_INTERNAL_H
|
||||
#define AARCH64_GCS_INTERNAL_H
|
||||
|
||||
#ifndef PR_SHADOW_STACK_ENABLE
|
||||
# define PR_SHADOW_STACK_ENABLE (1U << 0)
|
||||
# define PR_SHADOW_STACK_WRITE (1U << 1)
|
||||
# define PR_SHADOW_STACK_PUSH (1U << 2)
|
||||
#endif
|
||||
|
||||
static inline uint64_t gcs_get_el0_mode(CPUArchState *env)
|
||||
{
|
||||
uint64_t cr = env->cp15.gcscr_el[0];
|
||||
abi_ulong flags = 0;
|
||||
|
||||
flags |= cr & GCSCR_PCRSEL ? PR_SHADOW_STACK_ENABLE : 0;
|
||||
flags |= cr & GCSCR_STREN ? PR_SHADOW_STACK_WRITE : 0;
|
||||
flags |= cr & GCSCR_PUSHMEN ? PR_SHADOW_STACK_PUSH : 0;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void gcs_set_el0_mode(CPUArchState *env, uint64_t flags)
|
||||
{
|
||||
uint64_t cr = GCSCRE0_NTR;
|
||||
|
||||
cr |= flags & PR_SHADOW_STACK_ENABLE ? GCSCR_RVCHKEN | GCSCR_PCRSEL : 0;
|
||||
cr |= flags & PR_SHADOW_STACK_WRITE ? GCSCR_STREN : 0;
|
||||
cr |= flags & PR_SHADOW_STACK_PUSH ? GCSCR_PUSHMEN : 0;
|
||||
|
||||
env->cp15.gcscr_el[0] = cr;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -22,6 +22,7 @@
|
|||
#include "signal-common.h"
|
||||
#include "linux-user/trace.h"
|
||||
#include "target/arm/cpu-features.h"
|
||||
#include "gcs-internal.h"
|
||||
|
||||
struct target_sigcontext {
|
||||
uint64_t fault_address;
|
||||
|
|
@ -152,6 +153,16 @@ struct target_zt_context {
|
|||
QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \
|
||||
sizeof_field(CPUARMState, za_state.zt0));
|
||||
|
||||
#define TARGET_GCS_MAGIC 0x47435300
|
||||
#define GCS_SIGNAL_CAP(X) ((X) & TARGET_PAGE_MASK)
|
||||
|
||||
struct target_gcs_context {
|
||||
struct target_aarch64_ctx head;
|
||||
uint64_t gcspr;
|
||||
uint64_t features_enabled;
|
||||
uint64_t reserved;
|
||||
};
|
||||
|
||||
struct target_rt_sigframe {
|
||||
struct target_siginfo info;
|
||||
struct target_ucontext uc;
|
||||
|
|
@ -322,6 +333,35 @@ static void target_setup_zt_record(struct target_zt_context *zt,
|
|||
}
|
||||
}
|
||||
|
||||
static bool target_setup_gcs_record(struct target_gcs_context *ctx,
|
||||
CPUARMState *env, uint64_t return_addr)
|
||||
{
|
||||
uint64_t mode = gcs_get_el0_mode(env);
|
||||
uint64_t gcspr = env->cp15.gcspr_el[0];
|
||||
|
||||
if (mode & PR_SHADOW_STACK_ENABLE) {
|
||||
/* Push a cap for the signal frame. */
|
||||
gcspr -= 8;
|
||||
if (put_user_u64(GCS_SIGNAL_CAP(gcspr), gcspr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Push a gcs entry for the trampoline. */
|
||||
if (put_user_u64(return_addr, gcspr - 8)) {
|
||||
return false;
|
||||
}
|
||||
env->cp15.gcspr_el[0] = gcspr - 8;
|
||||
}
|
||||
|
||||
__put_user(TARGET_GCS_MAGIC, &ctx->head.magic);
|
||||
__put_user(sizeof(*ctx), &ctx->head.size);
|
||||
__put_user(gcspr, &ctx->gcspr);
|
||||
__put_user(mode, &ctx->features_enabled);
|
||||
__put_user(0, &ctx->reserved);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void target_restore_general_frame(CPUARMState *env,
|
||||
struct target_rt_sigframe *sf)
|
||||
{
|
||||
|
|
@ -502,6 +542,64 @@ static bool target_restore_zt_record(CPUARMState *env,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool target_restore_gcs_record(CPUARMState *env,
|
||||
struct target_gcs_context *ctx,
|
||||
bool *rebuild_hflags)
|
||||
{
|
||||
TaskState *ts = get_task_state(env_cpu(env));
|
||||
uint64_t cur_mode = gcs_get_el0_mode(env);
|
||||
uint64_t new_mode, gcspr;
|
||||
|
||||
__get_user(new_mode, &ctx->features_enabled);
|
||||
__get_user(gcspr, &ctx->gcspr);
|
||||
|
||||
/*
|
||||
* The kernel pushes the value through the hw register:
|
||||
* write_sysreg_s(gcspr, SYS_GCSPR_EL0) in restore_gcs_context,
|
||||
* then read_sysreg_s(SYS_GCSPR_EL0) in gcs_restore_signal.
|
||||
* Since the bottom 3 bits are RES0, this can (CONSTRAINED UNPREDICTABLE)
|
||||
* force align the value. Mirror the choice from gcspr_write().
|
||||
*/
|
||||
gcspr &= ~7;
|
||||
|
||||
if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
|
||||
PR_SHADOW_STACK_WRITE |
|
||||
PR_SHADOW_STACK_PUSH)) {
|
||||
return false;
|
||||
}
|
||||
if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
|
||||
return false;
|
||||
}
|
||||
if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (new_mode & PR_SHADOW_STACK_ENABLE) {
|
||||
uint64_t cap;
|
||||
|
||||
/* Pop and clear the signal cap. */
|
||||
if (get_user_u64(cap, gcspr)) {
|
||||
return false;
|
||||
}
|
||||
if (cap != GCS_SIGNAL_CAP(gcspr)) {
|
||||
return false;
|
||||
}
|
||||
if (put_user_u64(0, gcspr)) {
|
||||
return false;
|
||||
}
|
||||
gcspr += 8;
|
||||
} else {
|
||||
new_mode = 0;
|
||||
}
|
||||
|
||||
env->cp15.gcspr_el[0] = gcspr;
|
||||
if (new_mode != cur_mode) {
|
||||
*rebuild_hflags = true;
|
||||
gcs_set_el0_mode(env, new_mode);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static int target_restore_sigframe(CPUARMState *env,
|
||||
struct target_rt_sigframe *sf)
|
||||
{
|
||||
|
|
@ -511,8 +609,10 @@ static int target_restore_sigframe(CPUARMState *env,
|
|||
struct target_za_context *za = NULL;
|
||||
struct target_tpidr2_context *tpidr2 = NULL;
|
||||
struct target_zt_context *zt = NULL;
|
||||
struct target_gcs_context *gcs = NULL;
|
||||
uint64_t extra_datap = 0;
|
||||
bool used_extra = false;
|
||||
bool rebuild_hflags = false;
|
||||
int sve_size = 0;
|
||||
int za_size = 0;
|
||||
int zt_size = 0;
|
||||
|
|
@ -582,6 +682,15 @@ static int target_restore_sigframe(CPUARMState *env,
|
|||
zt_size = size;
|
||||
break;
|
||||
|
||||
case TARGET_GCS_MAGIC:
|
||||
if (gcs
|
||||
|| size != sizeof(struct target_gcs_context)
|
||||
|| !cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
|
||||
goto err;
|
||||
}
|
||||
gcs = (struct target_gcs_context *)ctx;
|
||||
break;
|
||||
|
||||
case TARGET_EXTRA_MAGIC:
|
||||
if (extra || size != sizeof(struct target_extra_context)) {
|
||||
goto err;
|
||||
|
|
@ -612,6 +721,10 @@ static int target_restore_sigframe(CPUARMState *env,
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (gcs && !target_restore_gcs_record(env, gcs, &rebuild_hflags)) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* SVE data, if present, overwrites FPSIMD data. */
|
||||
if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
|
||||
goto err;
|
||||
|
|
@ -631,6 +744,9 @@ static int target_restore_sigframe(CPUARMState *env,
|
|||
}
|
||||
if (env->svcr != svcr) {
|
||||
env->svcr = svcr;
|
||||
rebuild_hflags = true;
|
||||
}
|
||||
if (rebuild_hflags) {
|
||||
arm_rebuild_hflags(env);
|
||||
}
|
||||
unlock_user(extra, extra_datap, 0);
|
||||
|
|
@ -701,7 +817,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
|
|||
uc.tuc_mcontext.__reserved),
|
||||
};
|
||||
int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0;
|
||||
int zt_ofs = 0, esr_ofs = 0;
|
||||
int zt_ofs = 0, esr_ofs = 0, gcs_ofs = 0;
|
||||
int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0;
|
||||
struct target_rt_sigframe *frame;
|
||||
struct target_rt_frame_record *fr;
|
||||
|
|
@ -720,6 +836,11 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
|
|||
&layout);
|
||||
}
|
||||
|
||||
if (env->cp15.gcspr_el[0]) {
|
||||
gcs_ofs = alloc_sigframe_space(sizeof(struct target_gcs_context),
|
||||
&layout);
|
||||
}
|
||||
|
||||
/* SVE state needs saving only if it exists. */
|
||||
if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
|
||||
cpu_isar_feature(aa64_sme, env_archcpu(env))) {
|
||||
|
|
@ -779,6 +900,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
|
|||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
if (ka->sa_flags & TARGET_SA_RESTORER) {
|
||||
return_addr = ka->sa_restorer;
|
||||
} else {
|
||||
return_addr = default_rt_sigreturn;
|
||||
}
|
||||
|
||||
target_setup_general_frame(frame, env, set);
|
||||
target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
|
||||
if (esr_ofs) {
|
||||
|
|
@ -786,6 +913,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
|
|||
/* Leave ESR_EL1 clear while it's not relevant. */
|
||||
env->cp15.esr_el[1] = 0;
|
||||
}
|
||||
if (gcs_ofs &&
|
||||
!target_setup_gcs_record((void *)frame + gcs_ofs, env, return_addr)) {
|
||||
goto give_sigsegv;
|
||||
}
|
||||
target_setup_end_record((void *)frame + layout.std_end_ofs);
|
||||
if (layout.extra_ofs) {
|
||||
target_setup_extra_record((void *)frame + layout.extra_ofs,
|
||||
|
|
@ -811,11 +942,6 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
|
|||
__put_user(env->xregs[29], &fr->fp);
|
||||
__put_user(env->xregs[30], &fr->lr);
|
||||
|
||||
if (ka->sa_flags & TARGET_SA_RESTORER) {
|
||||
return_addr = ka->sa_restorer;
|
||||
} else {
|
||||
return_addr = default_rt_sigreturn;
|
||||
}
|
||||
env->xregs[0] = usig;
|
||||
env->xregs[29] = frame_addr + fr_ofs;
|
||||
env->xregs[30] = return_addr;
|
||||
|
|
|
|||
|
|
@ -6,8 +6,10 @@
|
|||
#ifndef AARCH64_TARGET_PRCTL_H
|
||||
#define AARCH64_TARGET_PRCTL_H
|
||||
|
||||
#include "qemu/units.h"
|
||||
#include "target/arm/cpu-features.h"
|
||||
#include "mte_user_helper.h"
|
||||
#include "gcs-internal.h"
|
||||
|
||||
static abi_long do_prctl_sve_get_vl(CPUArchState *env)
|
||||
{
|
||||
|
|
@ -206,4 +208,98 @@ static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
|
|||
}
|
||||
#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
|
||||
|
||||
static abi_long do_prctl_get_shadow_stack_status(CPUArchState *env,
|
||||
abi_long arg2)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
|
||||
if (!cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
return put_user_ual(gcs_get_el0_mode(env), arg2);
|
||||
}
|
||||
#define do_prctl_get_shadow_stack_status do_prctl_get_shadow_stack_status
|
||||
|
||||
static abi_long gcs_alloc(abi_ulong hint, abi_ulong size)
|
||||
{
|
||||
/*
|
||||
* Without softmmu, we cannot protect GCS memory properly.
|
||||
* Make do with normal read/write permissions. This at least allows
|
||||
* emulation of correct programs which don't access the gcs stack
|
||||
* with normal instructions.
|
||||
*/
|
||||
return target_mmap(hint, size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS |
|
||||
(hint ? MAP_FIXED_NOREPLACE : 0), -1, 0);
|
||||
}
|
||||
|
||||
static abi_ulong gcs_new_stack(TaskState *ts)
|
||||
{
|
||||
/* Use guest_stack_size as a proxy for RLIMIT_STACK. */
|
||||
abi_ulong size = MIN(MAX(guest_stack_size / 2, TARGET_PAGE_SIZE), 2 * GiB);
|
||||
abi_ulong base = gcs_alloc(0, size);
|
||||
|
||||
if (base == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ts->gcs_base = base;
|
||||
ts->gcs_size = size;
|
||||
return base + size - 8;
|
||||
}
|
||||
|
||||
static abi_long do_prctl_set_shadow_stack_status(CPUArchState *env,
|
||||
abi_long new_mode)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
TaskState *ts = get_task_state(env_cpu(env));
|
||||
abi_long cur_mode;
|
||||
|
||||
if (!cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
|
||||
PR_SHADOW_STACK_WRITE |
|
||||
PR_SHADOW_STACK_PUSH)) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
|
||||
cur_mode = gcs_get_el0_mode(env);
|
||||
if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
|
||||
return -TARGET_EBUSY;
|
||||
}
|
||||
|
||||
if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
|
||||
abi_long gcspr;
|
||||
|
||||
if (ts->gcs_base || env->cp15.gcspr_el[0]) {
|
||||
return -EINVAL;
|
||||
}
|
||||
gcspr = gcs_new_stack(ts);
|
||||
if (gcspr == -1) {
|
||||
return -TARGET_ENOMEM;
|
||||
}
|
||||
env->cp15.gcspr_el[0] = gcspr;
|
||||
}
|
||||
|
||||
gcs_set_el0_mode(env, new_mode);
|
||||
arm_rebuild_hflags(env);
|
||||
return 0;
|
||||
}
|
||||
#define do_prctl_set_shadow_stack_status do_prctl_set_shadow_stack_status
|
||||
|
||||
static abi_long do_prctl_lock_shadow_stack_status(CPUArchState *env,
|
||||
abi_long arg2)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
TaskState *ts = get_task_state(env_cpu(env));
|
||||
|
||||
if (!cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
ts->gcs_el0_locked |= arg2;
|
||||
return 0;
|
||||
}
|
||||
#define do_prctl_lock_shadow_stack_status do_prctl_lock_shadow_stack_status
|
||||
|
||||
#endif /* AARCH64_TARGET_PRCTL_H */
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
|
||||
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
|
||||
#define TARGET_SEGV_CPERR 10 /* Control protection fault */
|
||||
|
||||
#define TARGET_ARCH_HAS_SETUP_FRAME
|
||||
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
|
||||
|
|
|
|||
|
|
@ -121,6 +121,11 @@ struct TaskState {
|
|||
abi_ulong child_tidptr;
|
||||
#ifdef TARGET_M68K
|
||||
abi_ulong tp_value;
|
||||
#endif
|
||||
#if defined(TARGET_AARCH64)
|
||||
vaddr gcs_base;
|
||||
abi_ulong gcs_size;
|
||||
abi_ulong gcs_el0_locked;
|
||||
#endif
|
||||
int used; /* non zero if used */
|
||||
struct image_info *info;
|
||||
|
|
|
|||
|
|
@ -6353,6 +6353,17 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
|
|||
# define PR_SME_VL_LEN_MASK 0xffff
|
||||
# define PR_SME_VL_INHERIT (1 << 17)
|
||||
#endif
|
||||
#ifndef PR_GET_SHADOW_STACK_STATUS
|
||||
# define PR_GET_SHADOW_STACK_STATUS 74
|
||||
# define PR_SET_SHADOW_STACK_STATUS 75
|
||||
# define PR_LOCK_SHADOW_STACK_STATUS 76
|
||||
#endif
|
||||
#ifndef SHADOW_STACK_SET_TOKEN
|
||||
# define SHADOW_STACK_SET_TOKEN (1u << 0)
|
||||
#endif
|
||||
#ifndef SHADOW_STACK_SET_MARKER
|
||||
# define SHADOW_STACK_SET_MARKER (1u << 1)
|
||||
#endif
|
||||
|
||||
#include "target_prctl.h"
|
||||
|
||||
|
|
@ -6399,6 +6410,15 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
|
|||
#ifndef do_prctl_sme_set_vl
|
||||
#define do_prctl_sme_set_vl do_prctl_inval1
|
||||
#endif
|
||||
#ifndef do_prctl_get_shadow_stack_status
|
||||
#define do_prctl_get_shadow_stack_status do_prctl_inval1
|
||||
#endif
|
||||
#ifndef do_prctl_set_shadow_stack_status
|
||||
#define do_prctl_set_shadow_stack_status do_prctl_inval1
|
||||
#endif
|
||||
#ifndef do_prctl_lock_shadow_stack_status
|
||||
#define do_prctl_lock_shadow_stack_status do_prctl_inval1
|
||||
#endif
|
||||
|
||||
static abi_long do_prctl_syscall_user_dispatch(CPUArchState *env,
|
||||
abi_ulong arg2, abi_ulong arg3,
|
||||
|
|
@ -6499,6 +6519,21 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
|
|||
return -TARGET_EINVAL;
|
||||
}
|
||||
return do_prctl_get_tagged_addr_ctrl(env);
|
||||
case PR_GET_SHADOW_STACK_STATUS:
|
||||
if (arg3 || arg4 || arg5) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
return do_prctl_get_shadow_stack_status(env, arg2);
|
||||
case PR_SET_SHADOW_STACK_STATUS:
|
||||
if (arg3 || arg4 || arg5) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
return do_prctl_set_shadow_stack_status(env, arg2);
|
||||
case PR_LOCK_SHADOW_STACK_STATUS:
|
||||
if (arg3 || arg4 || arg5) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
return do_prctl_lock_shadow_stack_status(env, arg2);
|
||||
|
||||
case PR_GET_UNALIGN:
|
||||
return do_prctl_get_unalign(env, arg2);
|
||||
|
|
@ -6576,6 +6611,54 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
static abi_long do_map_shadow_stack(CPUArchState *env, abi_ulong addr,
|
||||
abi_ulong size, abi_int flags)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
abi_ulong alloc_size;
|
||||
|
||||
if (!cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
return -TARGET_EOPNOTSUPP;
|
||||
}
|
||||
if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER)) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
if (addr & ~TARGET_PAGE_MASK) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
if (size == 8 || !QEMU_IS_ALIGNED(size, 8)) {
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
|
||||
alloc_size = TARGET_PAGE_ALIGN(size);
|
||||
if (alloc_size < size) {
|
||||
return -TARGET_EOVERFLOW;
|
||||
}
|
||||
|
||||
mmap_lock();
|
||||
addr = gcs_alloc(addr, alloc_size);
|
||||
if (addr != -1) {
|
||||
if (flags & SHADOW_STACK_SET_TOKEN) {
|
||||
abi_ptr cap_ptr = addr + size - 8;
|
||||
uint64_t cap_val;
|
||||
|
||||
if (flags & SHADOW_STACK_SET_MARKER) {
|
||||
/* Leave an extra empty frame at top-of-stack. */
|
||||
cap_ptr -= 8;
|
||||
}
|
||||
cap_val = (cap_ptr & TARGET_PAGE_MASK) | 1;
|
||||
if (put_user_u64(cap_val, cap_ptr)) {
|
||||
/* Allocation succeeded above. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
}
|
||||
mmap_unlock();
|
||||
return get_errno(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define NEW_STACK_SIZE 0x40000
|
||||
|
||||
|
||||
|
|
@ -6657,6 +6740,21 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
|||
ts = g_new0(TaskState, 1);
|
||||
init_task_state(ts);
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
/*
|
||||
* If GCS is enabled in the parent thread, it is also enabled
|
||||
* in the child thread, but with a newly allocated stack.
|
||||
*/
|
||||
abi_long new_gcspr = 0;
|
||||
if (env->cp15.gcscr_el[0] & GCSCR_PCRSEL) {
|
||||
new_gcspr = gcs_new_stack(ts);
|
||||
if (new_gcspr == -1) {
|
||||
g_free(ts);
|
||||
return -TARGET_ENOMEM;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Grab a mutex so that thread setup appears atomic. */
|
||||
pthread_mutex_lock(&clone_lock);
|
||||
|
||||
|
|
@ -6678,6 +6776,11 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
|||
ts->info = parent_ts->info;
|
||||
ts->signal_mask = parent_ts->signal_mask;
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
ts->gcs_el0_locked = parent_ts->gcs_el0_locked;
|
||||
new_env->cp15.gcspr_el[0] = new_gcspr;
|
||||
#endif
|
||||
|
||||
if (flags & CLONE_CHILD_CLEARTID) {
|
||||
ts->child_tidptr = child_tidptr;
|
||||
}
|
||||
|
|
@ -9380,6 +9483,12 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
|
|||
FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
if (ts->gcs_base) {
|
||||
target_munmap(ts->gcs_base, ts->gcs_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
object_unparent(OBJECT(cpu));
|
||||
object_unref(OBJECT(cpu));
|
||||
/*
|
||||
|
|
@ -14010,6 +14119,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
|
|||
return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
case TARGET_NR_map_shadow_stack:
|
||||
return do_map_shadow_stack(cpu_env, arg1, arg2, arg3);
|
||||
#endif
|
||||
|
||||
default:
|
||||
qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
|
||||
return -TARGET_ENOSYS;
|
||||
|
|
|
|||
156
target/arm/cpregs-gcs.c
Normal file
156
target/arm/cpregs-gcs.c
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* QEMU ARM CP Register GCS regiters and instructions
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/icount.h"
|
||||
#include "hw/irq.h"
|
||||
#include "cpu.h"
|
||||
#include "cpu-features.h"
|
||||
#include "cpregs.h"
|
||||
#include "internals.h"
|
||||
|
||||
|
||||
static CPAccessResult access_gcs(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if (arm_current_el(env) < 3
|
||||
&& arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_GCSEN)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult access_gcs_el0(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.gcscr_el[0] & GCSCRE0_NTR)) {
|
||||
return CP_ACCESS_TRAP_EL1;
|
||||
}
|
||||
return access_gcs(env, ri, isread);
|
||||
}
|
||||
|
||||
static void gcspr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
/*
|
||||
* Bits [2:0] are RES0, so we might as well clear them now,
|
||||
* rather than upon each usage a-la GetCurrentGCSPointer.
|
||||
*/
|
||||
raw_write(env, ri, value & ~7);
|
||||
}
|
||||
|
||||
static CPAccessResult access_gcspushm(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
int el = arm_current_el(env);
|
||||
if (!(env->cp15.gcscr_el[el] & GCSCR_PUSHMEN)) {
|
||||
return CP_ACCESS_TRAP_BIT | (el ? el : 1);
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult access_gcspushx(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
/* Trap if lock taken, and enabled. */
|
||||
if (!(env->pstate & PSTATE_EXLOCK)) {
|
||||
int el = arm_current_el(env);
|
||||
if (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN) {
|
||||
return CP_ACCESS_EXLOCK;
|
||||
}
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult access_gcspopcx(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
/* Trap if lock not taken, and enabled. */
|
||||
if (env->pstate & PSTATE_EXLOCK) {
|
||||
int el = arm_current_el(env);
|
||||
if (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN) {
|
||||
return CP_ACCESS_EXLOCK;
|
||||
}
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo gcs_reginfo[] = {
|
||||
{ .name = "GCSCRE0_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 2,
|
||||
.access = PL1_RW, .accessfn = access_gcs, .fgt = FGT_NGCS_EL0,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[0]) },
|
||||
{ .name = "GCSCR_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 0,
|
||||
.access = PL1_RW, .accessfn = access_gcs, .fgt = FGT_NGCS_EL1,
|
||||
.nv2_redirect_offset = 0x8d0 | NV2_REDIR_NV1,
|
||||
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 5, 0),
|
||||
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 5, 0),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[1]) },
|
||||
{ .name = "GCSCR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 5, .opc2 = 0,
|
||||
.access = PL2_RW, .accessfn = access_gcs,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[2]) },
|
||||
{ .name = "GCSCR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 5, .opc2 = 0,
|
||||
.access = PL3_RW,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[3]) },
|
||||
|
||||
{ .name = "GCSPR_EL0", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 3, .crn = 2, .crm = 5, .opc2 = 1,
|
||||
.access = PL0_R | PL1_W, .accessfn = access_gcs_el0,
|
||||
.fgt = FGT_NGCS_EL0, .writefn = gcspr_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[0]) },
|
||||
{ .name = "GCSPR_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 1,
|
||||
.access = PL1_RW, .accessfn = access_gcs,
|
||||
.fgt = FGT_NGCS_EL1, .writefn = gcspr_write,
|
||||
.nv2_redirect_offset = 0x8c0 | NV2_REDIR_NV1,
|
||||
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 5, 1),
|
||||
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 5, 1),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[1]) },
|
||||
{ .name = "GCSPR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 5, .opc2 = 1,
|
||||
.access = PL2_RW, .accessfn = access_gcs, .writefn = gcspr_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[2]) },
|
||||
{ .name = "GCSPR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 5, .opc2 = 1,
|
||||
.access = PL3_RW, .writefn = gcspr_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[2]) },
|
||||
|
||||
{ .name = "GCSPUSHM", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 0,
|
||||
.access = PL0_W, .accessfn = access_gcspushm,
|
||||
.fgt = FGT_NGCSPUSHM_EL1, .type = ARM_CP_GCSPUSHM },
|
||||
{ .name = "GCSPOPM", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 1,
|
||||
.access = PL0_R, .type = ARM_CP_GCSPOPM },
|
||||
{ .name = "GCSSS1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 2,
|
||||
.access = PL0_W, .type = ARM_CP_GCSSS1 },
|
||||
{ .name = "GCSSS2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 3,
|
||||
.access = PL0_R, .type = ARM_CP_GCSSS2 },
|
||||
{ .name = "GCSPUSHX", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 4,
|
||||
.access = PL1_W, .accessfn = access_gcspushx, .fgt = FGT_NGCSEPP,
|
||||
.type = ARM_CP_GCSPUSHX },
|
||||
{ .name = "GCSPOPCX", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 5,
|
||||
.access = PL1_W, .accessfn = access_gcspopcx, .fgt = FGT_NGCSEPP,
|
||||
.type = ARM_CP_GCSPOPCX },
|
||||
{ .name = "GCSPOPX", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 6,
|
||||
.access = PL1_W, .type = ARM_CP_GCSPOPX },
|
||||
};
|
||||
|
||||
void define_gcs_cpregs(ARMCPU *cpu)
|
||||
{
|
||||
if (cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
define_arm_cp_regs(cpu, gcs_reginfo);
|
||||
}
|
||||
}
|
||||
|
|
@ -47,6 +47,14 @@ enum {
|
|||
ARM_CP_DC_ZVA = 0x0005,
|
||||
ARM_CP_DC_GVA = 0x0006,
|
||||
ARM_CP_DC_GZVA = 0x0007,
|
||||
/* Special: gcs instructions */
|
||||
ARM_CP_GCSPUSHM = 0x0008,
|
||||
ARM_CP_GCSPOPM = 0x0009,
|
||||
ARM_CP_GCSPUSHX = 0x000a,
|
||||
ARM_CP_GCSPOPX = 0x000b,
|
||||
ARM_CP_GCSPOPCX = 0x000c,
|
||||
ARM_CP_GCSSS1 = 0x000d,
|
||||
ARM_CP_GCSSS2 = 0x000e,
|
||||
|
||||
/* Flag: reads produce resetvalue; writes ignored. */
|
||||
ARM_CP_CONST = 1 << 4,
|
||||
|
|
@ -136,6 +144,11 @@ enum {
|
|||
* identically to the normal one, other than FGT trapping handling.)
|
||||
*/
|
||||
ARM_CP_ADD_TLBI_NXS = 1 << 21,
|
||||
/*
|
||||
* Flag: even though this sysreg has opc1 == 4 or 5, it
|
||||
* should not trap to EL2 when HCR_EL2.NV is set.
|
||||
*/
|
||||
ARM_CP_NV_NO_TRAP = 1 << 22,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -351,6 +364,14 @@ typedef enum CPAccessResult {
|
|||
* specified target EL.
|
||||
*/
|
||||
CP_ACCESS_UNDEFINED = (2 << 2),
|
||||
|
||||
/*
|
||||
* Access fails with EXLOCK, a GCS exception syndrome.
|
||||
* These traps are always to the current execution EL,
|
||||
* which is the same as the usual target EL because
|
||||
* they cannot occur from EL0.
|
||||
*/
|
||||
CP_ACCESS_EXLOCK = (3 << 2),
|
||||
} CPAccessResult;
|
||||
|
||||
/* Indexes into fgt_read[] */
|
||||
|
|
@ -779,8 +800,12 @@ typedef enum FGTBit {
|
|||
DO_BIT(HFGRTR, VBAR_EL1),
|
||||
DO_BIT(HFGRTR, ICC_IGRPENN_EL1),
|
||||
DO_BIT(HFGRTR, ERRIDR_EL1),
|
||||
DO_REV_BIT(HFGRTR, NGCS_EL0),
|
||||
DO_REV_BIT(HFGRTR, NGCS_EL1),
|
||||
DO_REV_BIT(HFGRTR, NSMPRI_EL1),
|
||||
DO_REV_BIT(HFGRTR, NTPIDR2_EL0),
|
||||
DO_REV_BIT(HFGRTR, NPIRE0_EL1),
|
||||
DO_REV_BIT(HFGRTR, NPIR_EL1),
|
||||
|
||||
/* Trap bits in HDFGRTR_EL2 / HDFGWTR_EL2, starting from bit 0. */
|
||||
DO_BIT(HDFGRTR, DBGBCRN_EL1),
|
||||
|
|
@ -859,6 +884,8 @@ typedef enum FGTBit {
|
|||
DO_BIT(HFGITR, DVPRCTX),
|
||||
DO_BIT(HFGITR, CPPRCTX),
|
||||
DO_BIT(HFGITR, DCCVAC),
|
||||
DO_REV_BIT(HFGITR, NGCSPUSHM_EL1),
|
||||
DO_REV_BIT(HFGITR, NGCSEPP),
|
||||
DO_BIT(HFGITR, ATS1E1A),
|
||||
} FGTBit;
|
||||
|
||||
|
|
@ -1156,12 +1183,17 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri)
|
|||
* fragile to future new sysregs, but this seems the least likely
|
||||
* to break.
|
||||
*
|
||||
* In particular, note that the released sysreg XML defines that
|
||||
* the FEAT_MEC sysregs and instructions do not follow this FEAT_NV
|
||||
* trapping rule, so we will need to add an ARM_CP_* flag to indicate
|
||||
* "register does not trap on NV" to handle those if/when we implement
|
||||
* FEAT_MEC.
|
||||
* In particular, note that the FEAT_MEC sysregs and instructions
|
||||
* are exceptions to this trapping rule, so they are marked as
|
||||
* ARM_CP_NV_NO_TRAP to indicate that they should not be trapped
|
||||
* to EL2. (They are an exception because the FEAT_MEC sysregs UNDEF
|
||||
* unless in Realm, and Realm is not expected to be virtualized.)
|
||||
*/
|
||||
|
||||
if (ri->type & ARM_CP_NV_NO_TRAP) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return ri->opc1 == 4 || ri->opc1 == 5;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1149,6 +1149,11 @@ static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id)
|
|||
return FIELD_EX64_IDREG(id, ID_AA64PFR1, NMI) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_gcs(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64_IDREG(id, ID_AA64PFR1, GCS) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 1;
|
||||
|
|
@ -1349,6 +1354,21 @@ static inline bool isar_feature_aa64_sctlr2(const ARMISARegisters *id)
|
|||
return FIELD_EX64_IDREG(id, ID_AA64MMFR3, SCTLRX) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_s1pie(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64_IDREG(id, ID_AA64MMFR3, S1PIE) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_s2pie(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64_IDREG(id, ID_AA64MMFR3, S2PIE) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_mec(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64_IDREG(id, ID_AA64MMFR3, MEC) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 4 &&
|
||||
|
|
|
|||
|
|
@ -311,6 +311,10 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
|
|||
env->cp15.mdscr_el1 |= 1 << 12;
|
||||
/* Enable FEAT_MOPS */
|
||||
env->cp15.sctlr_el[1] |= SCTLR_MSCEN;
|
||||
/* For Linux, GCSPR_EL0 is always readable. */
|
||||
if (cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
env->cp15.gcscr_el[0] = GCSCRE0_NTR;
|
||||
}
|
||||
#else
|
||||
/* Reset into the highest available EL */
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
|
|
@ -635,12 +639,22 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
|
|||
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_FGTEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_GCSEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_tcr2, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_TCR2EN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_SCTLR2EN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_s1pie, cpu) ||
|
||||
cpu_isar_feature(aa64_s2pie, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_PIEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mec, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_MECEN;
|
||||
}
|
||||
}
|
||||
|
||||
if (target_el == 2) {
|
||||
|
|
@ -819,7 +833,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t psr = pstate_read(env);
|
||||
uint64_t psr = pstate_read(env);
|
||||
int i, j;
|
||||
int el = arm_current_el(env);
|
||||
uint64_t hcr = arm_hcr_el2_eff(env);
|
||||
|
|
@ -841,7 +855,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|||
} else {
|
||||
ns_status = "";
|
||||
}
|
||||
qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
|
||||
qemu_fprintf(f, "PSTATE=%016" PRIx64 " %c%c%c%c %sEL%d%c",
|
||||
psr,
|
||||
psr & PSTATE_N ? 'N' : '-',
|
||||
psr & PSTATE_Z ? 'Z' : '-',
|
||||
|
|
@ -858,7 +872,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|||
(FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
|
||||
}
|
||||
if (cpu_isar_feature(aa64_bti, cpu)) {
|
||||
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
|
||||
qemu_fprintf(f, " BTYPE=%d", (int)(psr & PSTATE_BTYPE) >> 10);
|
||||
}
|
||||
qemu_fprintf(f, "%s%s%s",
|
||||
(hcr & HCR_NV) ? " NV" : "",
|
||||
|
|
|
|||
253
target/arm/cpu.h
253
target/arm/cpu.h
|
|
@ -33,6 +33,7 @@
|
|||
#include "target/arm/multiprocessing.h"
|
||||
#include "target/arm/gtimer.h"
|
||||
#include "target/arm/cpu-sysregs.h"
|
||||
#include "target/arm/mmuidx.h"
|
||||
|
||||
#define EXCP_UDEF 1 /* undefined instruction */
|
||||
#define EXCP_SWI 2 /* software interrupt */
|
||||
|
|
@ -267,7 +268,7 @@ typedef struct CPUArchState {
|
|||
uint64_t xregs[32];
|
||||
uint64_t pc;
|
||||
/* PSTATE isn't an architectural register for ARMv8. However, it is
|
||||
* convenient for us to assemble the underlying state into a 32 bit format
|
||||
* convenient for us to assemble the underlying state into a 64 bit format
|
||||
* identical to the architectural format used for the SPSR. (This is also
|
||||
* what the Linux kernel's 'pstate' field in signal handlers and KVM's
|
||||
* 'pstate' register are.) Of the PSTATE bits:
|
||||
|
|
@ -279,7 +280,7 @@ typedef struct CPUArchState {
|
|||
* SM and ZA are kept in env->svcr
|
||||
* all other bits are stored in their correct places in env->pstate
|
||||
*/
|
||||
uint32_t pstate;
|
||||
uint64_t pstate;
|
||||
bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */
|
||||
bool thumb; /* True if CPU is in thumb mode; cpsr[5] */
|
||||
|
||||
|
|
@ -368,6 +369,9 @@ typedef struct CPUArchState {
|
|||
uint64_t tcr2_el[3];
|
||||
uint64_t vtcr_el2; /* Virtualization Translation Control. */
|
||||
uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
|
||||
uint64_t pir_el[4]; /* PIRE0_EL1, PIR_EL1, PIR_EL2, PIR_EL3 */
|
||||
uint64_t pire0_el2;
|
||||
uint64_t s2pir_el2;
|
||||
uint32_t c2_data; /* MPU data cacheable bits. */
|
||||
uint32_t c2_insn; /* MPU instruction cacheable bits. */
|
||||
union { /* MMU domain access control register
|
||||
|
|
@ -576,6 +580,18 @@ typedef struct CPUArchState {
|
|||
|
||||
/* NV2 register */
|
||||
uint64_t vncr_el2;
|
||||
|
||||
uint64_t gcscr_el[4]; /* GCSCRE0_EL1, GCSCR_EL[123] */
|
||||
uint64_t gcspr_el[4]; /* GCSPR_EL[0123] */
|
||||
|
||||
/* MEC registers */
|
||||
uint64_t mecid_p0_el2;
|
||||
uint64_t mecid_a0_el2;
|
||||
uint64_t mecid_p1_el2;
|
||||
uint64_t mecid_a1_el2;
|
||||
uint64_t mecid_rl_a_el3;
|
||||
uint64_t vmecid_p_el2;
|
||||
uint64_t vmecid_a_el2;
|
||||
} cp15;
|
||||
|
||||
struct {
|
||||
|
|
@ -630,13 +646,10 @@ typedef struct CPUArchState {
|
|||
* entry process.
|
||||
*/
|
||||
struct {
|
||||
uint32_t syndrome; /* AArch64 format syndrome register */
|
||||
uint32_t fsr; /* AArch32 format fault status register info */
|
||||
uint64_t syndrome; /* AArch64 format syndrome register */
|
||||
uint64_t vaddress; /* virtual addr associated with exception, if any */
|
||||
uint32_t fsr; /* AArch32 format fault status register info */
|
||||
uint32_t target_el; /* EL the exception should be targeted for */
|
||||
/* If we implement EL2 we will also need to store information
|
||||
* about the intermediate physical address for stage 2 faults.
|
||||
*/
|
||||
} exception;
|
||||
|
||||
/* Information associated with an SError */
|
||||
|
|
@ -1498,6 +1511,7 @@ void pmu_init(ARMCPU *cpu);
|
|||
#define PSTATE_C (1U << 29)
|
||||
#define PSTATE_Z (1U << 30)
|
||||
#define PSTATE_N (1U << 31)
|
||||
#define PSTATE_EXLOCK (1ULL << 34)
|
||||
#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
|
||||
#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
|
||||
#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
|
||||
|
|
@ -1534,7 +1548,7 @@ static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
|
|||
* interprocessing, so we don't attempt to sync with the cpsr state used by
|
||||
* the 32 bit decoder.
|
||||
*/
|
||||
static inline uint32_t pstate_read(CPUARMState *env)
|
||||
static inline uint64_t pstate_read(CPUARMState *env)
|
||||
{
|
||||
int ZF;
|
||||
|
||||
|
|
@ -1544,7 +1558,7 @@ static inline uint32_t pstate_read(CPUARMState *env)
|
|||
| env->pstate | env->daif | (env->btype << 10);
|
||||
}
|
||||
|
||||
static inline void pstate_write(CPUARMState *env, uint32_t val)
|
||||
static inline void pstate_write(CPUARMState *env, uint64_t val)
|
||||
{
|
||||
env->ZF = (~val) & PSTATE_Z;
|
||||
env->NF = val;
|
||||
|
|
@ -1716,13 +1730,24 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
|||
#define SCR_ENAS0 (1ULL << 36)
|
||||
#define SCR_ADEN (1ULL << 37)
|
||||
#define SCR_HXEN (1ULL << 38)
|
||||
#define SCR_GCSEN (1ULL << 39)
|
||||
#define SCR_TRNDR (1ULL << 40)
|
||||
#define SCR_ENTP2 (1ULL << 41)
|
||||
#define SCR_TCR2EN (1ULL << 43)
|
||||
#define SCR_SCTLR2EN (1ULL << 44)
|
||||
#define SCR_PIEN (1ULL << 45)
|
||||
#define SCR_GPF (1ULL << 48)
|
||||
#define SCR_MECEN (1ULL << 49)
|
||||
#define SCR_NSE (1ULL << 62)
|
||||
|
||||
/* GCSCR_ELx fields */
|
||||
#define GCSCR_PCRSEL (1ULL << 0)
|
||||
#define GCSCR_RVCHKEN (1ULL << 5)
|
||||
#define GCSCR_EXLOCKEN (1ULL << 6)
|
||||
#define GCSCR_PUSHMEN (1ULL << 8)
|
||||
#define GCSCR_STREN (1ULL << 9)
|
||||
#define GCSCRE0_NTR (1ULL << 10)
|
||||
|
||||
/* Return the current FPSCR value. */
|
||||
uint32_t vfp_get_fpscr(CPUARMState *env);
|
||||
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
|
||||
|
|
@ -2221,6 +2246,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
|
|||
*/
|
||||
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
|
||||
uint64_t arm_hcr_el2_eff(CPUARMState *env);
|
||||
uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env);
|
||||
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
|
||||
|
||||
/*
|
||||
|
|
@ -2300,212 +2326,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
|
|||
|
||||
#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
|
||||
|
||||
/* ARM has the following "translation regimes" (as the ARM ARM calls them):
|
||||
*
|
||||
* If EL3 is 64-bit:
|
||||
* + NonSecure EL1 & 0 stage 1
|
||||
* + NonSecure EL1 & 0 stage 2
|
||||
* + NonSecure EL2
|
||||
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
|
||||
* + Secure EL1 & 0 stage 1
|
||||
* + Secure EL1 & 0 stage 2 (FEAT_SEL2)
|
||||
* + Secure EL2 (FEAT_SEL2)
|
||||
* + Secure EL2 & 0 (FEAT_SEL2)
|
||||
* + Realm EL1 & 0 stage 1 (FEAT_RME)
|
||||
* + Realm EL1 & 0 stage 2 (FEAT_RME)
|
||||
* + Realm EL2 (FEAT_RME)
|
||||
* + EL3
|
||||
* If EL3 is 32-bit:
|
||||
* + NonSecure PL1 & 0 stage 1
|
||||
* + NonSecure PL1 & 0 stage 2
|
||||
* + NonSecure PL2
|
||||
* + Secure PL1 & 0
|
||||
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
|
||||
*
|
||||
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
|
||||
* 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
|
||||
* because they may differ in access permissions even if the VA->PA map is
|
||||
* the same
|
||||
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
|
||||
* translation, which means that we have one mmu_idx that deals with two
|
||||
* concatenated translation regimes [this sort of combined s1+2 TLB is
|
||||
* architecturally permitted]
|
||||
* 3. we don't need to allocate an mmu_idx to translations that we won't be
|
||||
* handling via the TLB. The only way to do a stage 1 translation without
|
||||
* the immediate stage 2 translation is via the ATS or AT system insns,
|
||||
* which can be slow-pathed and always do a page table walk.
|
||||
* The only use of stage 2 translations is either as part of an s1+2
|
||||
* lookup or when loading the descriptors during a stage 1 page table walk,
|
||||
* and in both those cases we don't use the TLB.
|
||||
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
|
||||
* translation regimes, because they map reasonably well to each other
|
||||
* and they can't both be active at the same time.
|
||||
* 5. we want to be able to use the TLB for accesses done as part of a
|
||||
* stage1 page table walk, rather than having to walk the stage2 page
|
||||
* table over and over.
|
||||
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
|
||||
* Never (PAN) bit within PSTATE.
|
||||
* 7. we fold together most secure and non-secure regimes for A-profile,
|
||||
* because there are no banked system registers for aarch64, so the
|
||||
* process of switching between secure and non-secure is
|
||||
* already heavyweight.
|
||||
* 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
|
||||
* because both are in use simultaneously for Secure EL2.
|
||||
*
|
||||
* This gives us the following list of cases:
|
||||
*
|
||||
* EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
|
||||
* EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
|
||||
* EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
|
||||
* EL0 EL2&0
|
||||
* EL2 EL2&0
|
||||
* EL2 EL2&0 +PAN
|
||||
* EL2 (aka NS PL2)
|
||||
* EL3 (aka AArch32 S PL1 PL1&0)
|
||||
* AArch32 S PL0 PL1&0 (we call this EL30_0)
|
||||
* AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
|
||||
* Stage2 Secure
|
||||
* Stage2 NonSecure
|
||||
* plus one TLB per Physical address space: S, NS, Realm, Root
|
||||
*
|
||||
* for a total of 16 different mmu_idx.
|
||||
*
|
||||
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
||||
* as A profile. They only need to distinguish EL0 and EL1 (and
|
||||
* EL2 for cores like the Cortex-R52).
|
||||
*
|
||||
* M profile CPUs are rather different as they do not have a true MMU.
|
||||
* They have the following different MMU indexes:
|
||||
* User
|
||||
* Privileged
|
||||
* User, execution priority negative (ie the MPU HFNMIENA bit may apply)
|
||||
* Privileged, execution priority negative (ditto)
|
||||
* If the CPU supports the v8M Security Extension then there are also:
|
||||
* Secure User
|
||||
* Secure Privileged
|
||||
* Secure User, execution priority negative
|
||||
* Secure Privileged, execution priority negative
|
||||
*
|
||||
* The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
|
||||
* are not quite the same -- different CPU types (most notably M profile
|
||||
* vs A/R profile) would like to use MMU indexes with different semantics,
|
||||
* but since we don't ever need to use all of those in a single CPU we
|
||||
* can avoid having to set NB_MMU_MODES to "total number of A profile MMU
|
||||
* modes + total number of M profile MMU modes". The lower bits of
|
||||
* ARMMMUIdx are the core TLB mmu index, and the higher bits are always
|
||||
* the same for any particular CPU.
|
||||
* Variables of type ARMMUIdx are always full values, and the core
|
||||
* index values are in variables of type 'int'.
|
||||
*
|
||||
* Our enumeration includes at the end some entries which are not "true"
|
||||
* mmu_idx values in that they don't have corresponding TLBs and are only
|
||||
* valid for doing slow path page table walks.
|
||||
*
|
||||
* The constant names here are patterned after the general style of the names
|
||||
* of the AT/ATS operations.
|
||||
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
|
||||
* For M profile we arrange them to have a bit for priv, a bit for negpri
|
||||
* and a bit for secure.
|
||||
*/
|
||||
#define ARM_MMU_IDX_A 0x10 /* A profile */
|
||||
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
|
||||
#define ARM_MMU_IDX_M 0x40 /* M profile */
|
||||
|
||||
/* Meanings of the bits for M profile mmu idx values */
|
||||
#define ARM_MMU_IDX_M_PRIV 0x1
|
||||
#define ARM_MMU_IDX_M_NEGPRI 0x2
|
||||
#define ARM_MMU_IDX_M_S 0x4 /* Secure */
|
||||
|
||||
#define ARM_MMU_IDX_TYPE_MASK \
|
||||
(ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
|
||||
#define ARM_MMU_IDX_COREIDX_MASK 0xf
|
||||
|
||||
typedef enum ARMMMUIdx {
|
||||
/*
|
||||
* A-profile.
|
||||
*/
|
||||
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* Used for second stage of an S12 page table walk, or for descriptor
|
||||
* loads during first stage of an S1 page table walk. Note that both
|
||||
* are in use simultaneously for SecureEL2: the security state for
|
||||
* the S2 ptw is selected by the NS bit from the S1 ptw.
|
||||
*/
|
||||
ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
|
||||
|
||||
/* TLBs with 1-1 mapping to the physical address spaces. */
|
||||
ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* These are not allocated TLBs and are used only for AT system
|
||||
* instructions or for the first stage of an S12 page table walk.
|
||||
*/
|
||||
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
|
||||
|
||||
/*
|
||||
* M-profile.
|
||||
*/
|
||||
ARMMMUIdx_MUser = ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
|
||||
ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
|
||||
ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
|
||||
ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
|
||||
ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
|
||||
ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
|
||||
ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
|
||||
} ARMMMUIdx;
|
||||
|
||||
/*
|
||||
* Bit macros for the core-mmu-index values for each index,
|
||||
* for use when calling tlb_flush_by_mmuidx() and friends.
|
||||
*/
|
||||
#define TO_CORE_BIT(NAME) \
|
||||
ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
|
||||
|
||||
typedef enum ARMMMUIdxBit {
|
||||
TO_CORE_BIT(E10_0),
|
||||
TO_CORE_BIT(E20_0),
|
||||
TO_CORE_BIT(E10_1),
|
||||
TO_CORE_BIT(E10_1_PAN),
|
||||
TO_CORE_BIT(E2),
|
||||
TO_CORE_BIT(E20_2),
|
||||
TO_CORE_BIT(E20_2_PAN),
|
||||
TO_CORE_BIT(E3),
|
||||
TO_CORE_BIT(E30_0),
|
||||
TO_CORE_BIT(E30_3_PAN),
|
||||
TO_CORE_BIT(Stage2),
|
||||
TO_CORE_BIT(Stage2_S),
|
||||
|
||||
TO_CORE_BIT(MUser),
|
||||
TO_CORE_BIT(MPriv),
|
||||
TO_CORE_BIT(MUserNegPri),
|
||||
TO_CORE_BIT(MPrivNegPri),
|
||||
TO_CORE_BIT(MSUser),
|
||||
TO_CORE_BIT(MSPriv),
|
||||
TO_CORE_BIT(MSUserNegPri),
|
||||
TO_CORE_BIT(MSPrivNegPri),
|
||||
} ARMMMUIdxBit;
|
||||
|
||||
#undef TO_CORE_BIT
|
||||
|
||||
#define MMU_USER_IDX 0
|
||||
|
||||
/* Indexes used when registering address spaces with cpu_address_space_init */
|
||||
typedef enum ARMASIdx {
|
||||
ARMASIdx_NS = 0,
|
||||
|
|
@ -2667,6 +2487,9 @@ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
|
|||
FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */
|
||||
FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */
|
||||
FIELD(TBFLAG_A64, ZT0EXC_EL, 39, 2)
|
||||
FIELD(TBFLAG_A64, GCS_EN, 41, 1)
|
||||
FIELD(TBFLAG_A64, GCS_RVCEN, 42, 1)
|
||||
FIELD(TBFLAG_A64, GCSSTR_EL, 43, 2)
|
||||
|
||||
/*
|
||||
* Helpers for using the above. Note that only the A64 accessors use
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
|||
case 32:
|
||||
return gdb_get_reg64(mem_buf, env->pc);
|
||||
case 33:
|
||||
/* pstate is now a 64-bit value; can we simply adjust the xml? */
|
||||
return gdb_get_reg32(mem_buf, pstate_read(env));
|
||||
}
|
||||
/* Unknown register. */
|
||||
|
|
@ -75,6 +76,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
|||
return 8;
|
||||
case 33:
|
||||
/* CPSR */
|
||||
/* pstate is now a 64-bit value; can we simply adjust the xml? */
|
||||
pstate_write(env, tmp);
|
||||
return 4;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -420,7 +420,9 @@ int alle1_tlbmask(CPUARMState *env)
|
|||
*/
|
||||
return (ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_1_GCS |
|
||||
ARMMMUIdxBit_E10_0 |
|
||||
ARMMMUIdxBit_E10_0_GCS |
|
||||
ARMMMUIdxBit_Stage2 |
|
||||
ARMMMUIdxBit_Stage2_S);
|
||||
}
|
||||
|
|
@ -764,12 +766,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
|||
if (cpu_isar_feature(aa64_ecv, cpu)) {
|
||||
valid_mask |= SCR_ECVEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
valid_mask |= SCR_GCSEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_tcr2, cpu)) {
|
||||
valid_mask |= SCR_TCR2EN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
|
||||
valid_mask |= SCR_SCTLR2EN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_s1pie, cpu) ||
|
||||
cpu_isar_feature(aa64_s2pie, cpu)) {
|
||||
valid_mask |= SCR_PIEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mec, cpu)) {
|
||||
valid_mask |= SCR_MECEN;
|
||||
}
|
||||
} else {
|
||||
valid_mask &= ~(SCR_RW | SCR_ST);
|
||||
if (cpu_isar_feature(aa32_ras, cpu)) {
|
||||
|
|
@ -804,12 +816,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
|||
*/
|
||||
if (changed & (SCR_NS | SCR_NSE)) {
|
||||
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
|
||||
ARMMMUIdxBit_E10_0_GCS |
|
||||
ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_0_GCS |
|
||||
ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_1_GCS |
|
||||
ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E2));
|
||||
ARMMMUIdxBit_E20_2_GCS |
|
||||
ARMMMUIdxBit_E2 |
|
||||
ARMMMUIdxBit_E2_GCS));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2783,7 +2800,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
(arm_hcr_el2_eff(env) & HCR_E2H)) {
|
||||
uint16_t mask = ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E20_0;
|
||||
ARMMMUIdxBit_E20_2_GCS |
|
||||
ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_0_GCS;
|
||||
tlb_flush_by_mmuidx(env_cpu(env), mask);
|
||||
}
|
||||
raw_write(env, ri, value);
|
||||
|
|
@ -3407,15 +3426,71 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
}
|
||||
}
|
||||
|
||||
static CPAccessResult access_nv1_with_nvx(uint64_t hcr_nv)
|
||||
{
|
||||
return hcr_nv == (HCR_NV | HCR_NV1) ? CP_ACCESS_TRAP_EL2 : CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if (arm_current_el(env) == 1) {
|
||||
uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
|
||||
return access_nv1_with_nvx(arm_hcr_el2_nvx_eff(env));
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
if (hcr_nv == (HCR_NV | HCR_NV1)) {
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
static CPAccessResult access_nv1_or_exlock_el1(CPUARMState *env,
|
||||
const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if (arm_current_el(env) == 1) {
|
||||
uint64_t nvx = arm_hcr_el2_nvx_eff(env);
|
||||
|
||||
if (!isread &&
|
||||
(env->pstate & PSTATE_EXLOCK) &&
|
||||
(env->cp15.gcscr_el[1] & GCSCR_EXLOCKEN) &&
|
||||
!(nvx & HCR_NV1)) {
|
||||
return CP_ACCESS_EXLOCK;
|
||||
}
|
||||
return access_nv1_with_nvx(nvx);
|
||||
}
|
||||
|
||||
/*
|
||||
* At EL2, since VHE redirection is done at translation time,
|
||||
* el_is_in_host is always false here, so EXLOCK does not apply.
|
||||
*/
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult access_exlock_el2(CPUARMState *env,
|
||||
const ARMCPRegInfo *ri, bool isread)
|
||||
{
|
||||
int el = arm_current_el(env);
|
||||
|
||||
if (el == 3) {
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Access to the EL2 register from EL1 means NV is set, and
|
||||
* EXLOCK has priority over an NV1 trap to EL2.
|
||||
*/
|
||||
if (!isread &&
|
||||
(env->pstate & PSTATE_EXLOCK) &&
|
||||
(env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN)) {
|
||||
return CP_ACCESS_EXLOCK;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult access_exlock_el3(CPUARMState *env,
|
||||
const ARMCPRegInfo *ri, bool isread)
|
||||
{
|
||||
if (!isread &&
|
||||
(env->pstate & PSTATE_EXLOCK) &&
|
||||
(env->cp15.gcscr_el[3] & GCSCR_EXLOCKEN)) {
|
||||
return CP_ACCESS_EXLOCK;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
|
@ -3591,7 +3666,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|||
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_ALIAS,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
|
||||
.access = PL1_RW, .accessfn = access_nv1,
|
||||
.access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
|
||||
.nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
|
||||
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1),
|
||||
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1),
|
||||
|
|
@ -3599,7 +3674,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|||
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_ALIAS,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
|
||||
.access = PL1_RW, .accessfn = access_nv1,
|
||||
.access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
|
||||
.nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
|
||||
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0),
|
||||
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0),
|
||||
|
|
@ -3888,6 +3963,16 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
|
|||
return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
|
||||
}
|
||||
|
||||
uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env)
|
||||
{
|
||||
uint64_t hcr = arm_hcr_el2_eff(env);
|
||||
|
||||
if (!(hcr & HCR_NV)) {
|
||||
return 0; /* CONSTRAINED UNPREDICTABLE wrt NV1 */
|
||||
}
|
||||
return hcr & (HCR_NV2 | HCR_NV1 | HCR_NV);
|
||||
}
|
||||
|
||||
/*
|
||||
* Corresponds to ARM pseudocode function ELIsInHost().
|
||||
*/
|
||||
|
|
@ -3940,6 +4025,9 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
|
||||
valid_mask |= HCRX_SCTLR2EN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
valid_mask |= HCRX_GCSEN;
|
||||
}
|
||||
|
||||
/* Clear RES0 bits. */
|
||||
env->cp15.hcrx_el2 = value & valid_mask;
|
||||
|
|
@ -4010,6 +4098,9 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env)
|
|||
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
|
||||
hcrx |= HCRX_SCTLR2EN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_gcs, cpu)) {
|
||||
hcrx |= HCRX_GCSEN;
|
||||
}
|
||||
return hcrx;
|
||||
}
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
|
||||
|
|
@ -4067,7 +4158,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
|
|||
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
|
||||
.access = PL2_RW,
|
||||
.access = PL2_RW, .accessfn = access_exlock_el2,
|
||||
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
|
||||
{ .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.type = ARM_CP_NV2_REDIRECT,
|
||||
|
|
@ -4085,7 +4176,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
|
|||
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
|
||||
.access = PL2_RW,
|
||||
.access = PL2_RW, .accessfn = access_exlock_el2,
|
||||
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
|
||||
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
|
||||
|
|
@ -4367,7 +4458,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
|
|||
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_ALIAS,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
|
||||
.access = PL3_RW,
|
||||
.access = PL3_RW, .accessfn = access_exlock_el3,
|
||||
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
|
||||
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
|
||||
|
|
@ -4378,7 +4469,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
|
|||
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_ALIAS,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
|
||||
.access = PL3_RW,
|
||||
.access = PL3_RW, .accessfn = access_exlock_el3,
|
||||
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
|
||||
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
|
||||
|
|
@ -5000,6 +5091,96 @@ static const ARMCPRegInfo nmi_reginfo[] = {
|
|||
.resetfn = arm_cp_reset_ignore },
|
||||
};
|
||||
|
||||
static CPAccessResult mecid_access(CPUARMState *env,
|
||||
const ARMCPRegInfo *ri, bool isread)
|
||||
{
|
||||
int el = arm_current_el(env);
|
||||
|
||||
if (el == 2) {
|
||||
if (arm_security_space(env) != ARMSS_Realm) {
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
}
|
||||
|
||||
if (!(env->cp15.scr_el3 & SCR_MECEN)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
}
|
||||
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static void mecid_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
value = extract64(value, 0, MECID_WIDTH);
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
||||
static CPAccessResult cipae_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
switch (arm_security_space(env)) {
|
||||
case ARMSS_Root: /* EL3 */
|
||||
case ARMSS_Realm: /* Realm EL2 */
|
||||
return CP_ACCESS_OK;
|
||||
default:
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
}
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo mec_reginfo[] = {
|
||||
{ .name = "MECIDR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 7, .crn = 10, .crm = 8,
|
||||
.access = PL2_R, .type = ARM_CP_CONST | ARM_CP_NV_NO_TRAP,
|
||||
.resetvalue = MECID_WIDTH - 1 },
|
||||
{ .name = "MECID_P0_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 8,
|
||||
.access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = mecid_access, .writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mecid_p0_el2) },
|
||||
{ .name = "MECID_A0_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 8,
|
||||
.access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = mecid_access, .writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mecid_a0_el2) },
|
||||
{ .name = "MECID_P1_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 8,
|
||||
.access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = mecid_access, .writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mecid_p1_el2) },
|
||||
{ .name = "MECID_A1_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 8,
|
||||
.access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = mecid_access, .writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mecid_a1_el2) },
|
||||
{ .name = "MECID_RL_A_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .opc2 = 1, .crn = 10, .crm = 10,
|
||||
.access = PL3_RW, .accessfn = mecid_access,
|
||||
.writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mecid_rl_a_el3) },
|
||||
{ .name = "VMECID_P_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 9,
|
||||
.access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = mecid_access, .writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vmecid_p_el2) },
|
||||
{ .name = "VMECID_A_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 9,
|
||||
.access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = mecid_access, .writefn = mecid_write,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vmecid_a_el2) },
|
||||
{ .name = "DC_CIPAE", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 0,
|
||||
.access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = cipae_access },
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo mec_mte_reginfo[] = {
|
||||
{ .name = "DC_CIGDPAE", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 7,
|
||||
.access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
|
||||
.accessfn = cipae_access },
|
||||
};
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* We don't know until after realize whether there's a GICv3
|
||||
|
|
@ -5842,6 +6023,9 @@ static void sctlr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
uint64_t valid_mask = 0;
|
||||
|
||||
if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
|
||||
valid_mask |= SCTLR2_EMEC;
|
||||
}
|
||||
value &= valid_mask;
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
|
@ -5851,6 +6035,9 @@ static void sctlr2_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
uint64_t valid_mask = 0;
|
||||
|
||||
if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
|
||||
valid_mask |= SCTLR2_EMEC;
|
||||
}
|
||||
value &= valid_mask;
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
|
@ -5902,8 +6089,12 @@ static CPAccessResult tcr2_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
uint64_t valid_mask = 0;
|
||||
|
||||
if (cpu_isar_feature(aa64_s1pie, cpu)) {
|
||||
valid_mask |= TCR2_PIE;
|
||||
}
|
||||
value &= valid_mask;
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
|
@ -5911,8 +6102,15 @@ static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
static void tcr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
uint64_t valid_mask = 0;
|
||||
|
||||
if (cpu_isar_feature(aa64_s1pie, cpu)) {
|
||||
valid_mask |= TCR2_PIE;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
|
||||
valid_mask |= TCR2_AMEC0 | TCR2_AMEC1;
|
||||
}
|
||||
value &= valid_mask;
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
|
@ -5933,6 +6131,64 @@ static const ARMCPRegInfo tcr2_reginfo[] = {
|
|||
.fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[2]) },
|
||||
};
|
||||
|
||||
static CPAccessResult pien_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_PIEN)
|
||||
&& arm_current_el(env) < 3) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static CPAccessResult pien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
CPAccessResult ret = access_tvm_trvm(env, ri, isread);
|
||||
if (ret == CP_ACCESS_OK) {
|
||||
ret = pien_access(env, ri, isread);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo s1pie_reginfo[] = {
|
||||
{ .name = "PIR_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 10, .crm = 2,
|
||||
.access = PL1_RW, .accessfn = pien_el1_access,
|
||||
.fgt = FGT_NPIR_EL1, .nv2_redirect_offset = 0x2a0 | NV2_REDIR_NV1,
|
||||
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 3),
|
||||
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 3),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.pir_el[1]) },
|
||||
{ .name = "PIR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 2,
|
||||
.access = PL2_RW, .accessfn = pien_access,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.pir_el[2]) },
|
||||
{ .name = "PIR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 10, .crm = 2,
|
||||
.access = PL3_RW,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.pir_el[3]) },
|
||||
{ .name = "PIRE0_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 10, .crm = 2,
|
||||
.access = PL1_RW, .accessfn = pien_el1_access,
|
||||
.fgt = FGT_NPIRE0_EL1, .nv2_redirect_offset = 0x290 | NV2_REDIR_NV1,
|
||||
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 2),
|
||||
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 2),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.pir_el[0]) },
|
||||
{ .name = "PIRE0_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 2,
|
||||
.access = PL2_RW, .accessfn = pien_access,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.pire0_el2) },
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo s2pie_reginfo[] = {
|
||||
{ .name = "S2PIR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .opc2 = 5, .crn = 10, .crm = 2,
|
||||
.access = PL2_RW, .accessfn = pien_access,
|
||||
.nv2_redirect_offset = 0x2b0,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.s2pir_el2) },
|
||||
};
|
||||
|
||||
void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
{
|
||||
/* Register all the coprocessor registers based on feature bits */
|
||||
|
|
@ -7165,6 +7421,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|||
define_arm_cp_regs(cpu, tcr2_reginfo);
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_s1pie, cpu)) {
|
||||
define_arm_cp_regs(cpu, s1pie_reginfo);
|
||||
}
|
||||
if (cpu_isar_feature(aa64_s2pie, cpu)) {
|
||||
define_arm_cp_regs(cpu, s2pie_reginfo);
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mec, cpu)) {
|
||||
define_arm_cp_regs(cpu, mec_reginfo);
|
||||
if (cpu_isar_feature(aa64_mte, cpu)) {
|
||||
define_arm_cp_regs(cpu, mec_mte_reginfo);
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(any_predinv, cpu)) {
|
||||
define_arm_cp_regs(cpu, predinv_reginfo);
|
||||
}
|
||||
|
|
@ -7174,6 +7443,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|||
}
|
||||
|
||||
define_pm_cpregs(cpu);
|
||||
define_gcs_cpregs(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -8800,7 +9070,7 @@ static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
|
|||
}
|
||||
}
|
||||
|
||||
static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
|
||||
uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
|
||||
{
|
||||
uint32_t ret = cpsr_read(env);
|
||||
|
||||
|
|
@ -8815,6 +9085,24 @@ static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val)
|
||||
{
|
||||
uint32_t mask;
|
||||
|
||||
/* Save SPSR_ELx.SS into PSTATE. */
|
||||
env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
|
||||
val &= ~PSTATE_SS;
|
||||
|
||||
/* Move DIT to the correct location for CPSR */
|
||||
if (val & PSTATE_DIT) {
|
||||
val &= ~PSTATE_DIT;
|
||||
val |= CPSR_DIT;
|
||||
}
|
||||
|
||||
mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
|
||||
cpsr_write(env, val, mask, CPSRWriteRaw);
|
||||
}
|
||||
|
||||
static bool syndrome_is_sync_extabt(uint32_t syndrome)
|
||||
{
|
||||
/* Return true if this syndrome value is a synchronous external abort */
|
||||
|
|
@ -8847,8 +9135,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||
CPUARMState *env = &cpu->env;
|
||||
unsigned int new_el = env->exception.target_el;
|
||||
vaddr addr = env->cp15.vbar_el[new_el];
|
||||
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
|
||||
unsigned int old_mode;
|
||||
uint64_t new_mode = aarch64_pstate_mode(new_el, true);
|
||||
uint64_t old_mode;
|
||||
unsigned int cur_el = arm_current_el(env);
|
||||
int rt;
|
||||
|
||||
|
|
@ -8891,8 +9179,13 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||
} else {
|
||||
addr += 0x600;
|
||||
}
|
||||
} else if (pstate_read(env) & PSTATE_SP) {
|
||||
addr += 0x200;
|
||||
} else {
|
||||
if (pstate_read(env) & PSTATE_SP) {
|
||||
addr += 0x200;
|
||||
}
|
||||
if (is_a64(env) && (env->cp15.gcscr_el[new_el] & GCSCR_EXLOCKEN)) {
|
||||
new_mode |= PSTATE_EXLOCK;
|
||||
}
|
||||
}
|
||||
|
||||
switch (cs->exception_index) {
|
||||
|
|
@ -8996,7 +9289,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||
* If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
|
||||
* If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
|
||||
*/
|
||||
old_mode = deposit32(old_mode, 2, 2, 2);
|
||||
old_mode = deposit64(old_mode, 2, 2, 2);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
@ -9009,7 +9302,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||
}
|
||||
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
|
||||
qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%" PRIx64 "\n", old_mode);
|
||||
qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
|
||||
env->elr_el[new_el]);
|
||||
|
||||
|
|
@ -9063,7 +9356,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||
|
||||
env->pc = addr;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
|
||||
qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64
|
||||
" PSTATE 0x%" PRIx64 "\n",
|
||||
new_el, env->pc, pstate_read(env));
|
||||
}
|
||||
|
||||
|
|
@ -9119,7 +9413,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
|||
new_el);
|
||||
if (qemu_loglevel_mask(CPU_LOG_INT)
|
||||
&& !excp_is_internal(cs->exception_index)) {
|
||||
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
|
||||
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx64 "\n",
|
||||
syn_get_ec(env->exception.syndrome),
|
||||
env->exception.syndrome);
|
||||
}
|
||||
|
|
@ -9309,21 +9603,34 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
|||
bool el1_is_aa32)
|
||||
{
|
||||
uint64_t tcr = regime_tcr(env, mmu_idx);
|
||||
bool epd, hpd, tsz_oob, ds, ha, hd;
|
||||
bool epd, hpd, tsz_oob, ds, ha, hd, pie = false;
|
||||
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
|
||||
ARMGranuleSize gran;
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
bool stage2 = regime_is_stage2(mmu_idx);
|
||||
int r_el = regime_el(mmu_idx);
|
||||
|
||||
if (!regime_has_2_ranges(mmu_idx)) {
|
||||
select = 0;
|
||||
tsz = extract32(tcr, 0, 6);
|
||||
gran = tg0_to_gran_size(extract32(tcr, 14, 2));
|
||||
if (stage2) {
|
||||
/* VTCR_EL2 */
|
||||
hpd = false;
|
||||
/*
|
||||
* Stage2 does not have hierarchical permissions.
|
||||
* Thus disabling them makes things easier during ptw.
|
||||
*/
|
||||
hpd = true;
|
||||
pie = extract64(tcr, 36, 1) && cpu_isar_feature(aa64_s2pie, cpu);
|
||||
} else {
|
||||
hpd = extract32(tcr, 24, 1);
|
||||
if (r_el == 3) {
|
||||
pie = (extract64(tcr, 35, 1)
|
||||
&& cpu_isar_feature(aa64_s1pie, cpu));
|
||||
} else {
|
||||
pie = ((env->cp15.tcr2_el[2] & TCR2_PIE)
|
||||
&& (!arm_feature(env, ARM_FEATURE_EL3)
|
||||
|| (env->cp15.scr_el3 & SCR_TCR2EN)));
|
||||
}
|
||||
}
|
||||
epd = false;
|
||||
sh = extract32(tcr, 12, 2);
|
||||
|
|
@ -9360,10 +9667,16 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
|||
ds = extract64(tcr, 59, 1);
|
||||
|
||||
if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
|
||||
regime_is_user(env, mmu_idx)) {
|
||||
regime_is_user(mmu_idx)) {
|
||||
epd = true;
|
||||
}
|
||||
|
||||
pie = ((env->cp15.tcr2_el[r_el] & TCR2_PIE)
|
||||
&& (!arm_feature(env, ARM_FEATURE_EL3)
|
||||
|| (env->cp15.scr_el3 & SCR_TCR2EN))
|
||||
&& (r_el == 2 || (arm_hcrx_el2_eff(env) & HCRX_TCR2EN)));
|
||||
}
|
||||
hpd |= pie;
|
||||
|
||||
gran = sanitize_gran_size(cpu, gran, stage2);
|
||||
|
||||
|
|
@ -9442,6 +9755,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
|||
.ha = ha,
|
||||
.hd = ha && hd,
|
||||
.gran = gran,
|
||||
.pie = pie,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -9556,33 +9870,6 @@ int fp_exception_el(CPUARMState *env, int cur_el)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Return the exception level we're running at if this is our mmu_idx */
|
||||
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
||||
{
|
||||
if (mmu_idx & ARM_MMU_IDX_M) {
|
||||
return mmu_idx & ARM_MMU_IDX_M_PRIV;
|
||||
}
|
||||
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E30_0:
|
||||
return 0;
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
return 1;
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
return 2;
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_E30_3_PAN:
|
||||
return 3;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_TCG
|
||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@
|
|||
#include "system/memory.h"
|
||||
#include "syndrome.h"
|
||||
#include "cpu-features.h"
|
||||
#include "mmuidx-internal.h"
|
||||
|
||||
/* register banks for CPU modes */
|
||||
#define BANK_USRSYS 0
|
||||
|
|
@ -250,6 +251,7 @@ FIELD(VSTCR, SA, 30, 1)
|
|||
#define HCRX_MSCEN (1ULL << 11)
|
||||
#define HCRX_TCR2EN (1ULL << 14)
|
||||
#define HCRX_SCTLR2EN (1ULL << 15)
|
||||
#define HCRX_GCSEN (1ULL << 22)
|
||||
|
||||
#define HPFAR_NS (1ULL << 63)
|
||||
|
||||
|
|
@ -304,14 +306,14 @@ FIELD(CNTHCTL, CNTPMASK, 19, 1)
|
|||
* and never returns because we will longjump back up to the CPU main loop.
|
||||
*/
|
||||
G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el);
|
||||
uint64_t syndrome, uint32_t target_el);
|
||||
|
||||
/*
|
||||
* Similarly, but also use unwinding to restore cpu state.
|
||||
*/
|
||||
G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el,
|
||||
uintptr_t ra);
|
||||
uint64_t syndrome, uint32_t target_el,
|
||||
uintptr_t ra);
|
||||
|
||||
/*
|
||||
* For AArch64, map a given EL to an index in the banked_spsr array.
|
||||
|
|
@ -752,6 +754,7 @@ struct ARMMMUFaultInfo {
|
|||
bool s1ptw;
|
||||
bool s1ns;
|
||||
bool ea;
|
||||
bool dirtybit; /* FEAT_S1PIE, FEAT_S2PIE */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -983,8 +986,6 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
|
|||
return mmu_idx | ARM_MMU_IDX_A;
|
||||
}
|
||||
|
||||
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
|
||||
|
||||
/* Return the MMU index for a v7M CPU in the specified security state */
|
||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
||||
|
||||
|
|
@ -1027,108 +1028,10 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if this address translation regime has two ranges.
|
||||
* Note that this will not return the correct answer for AArch32
|
||||
* Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
|
||||
* never called from a context where EL3 can be AArch32. (The
|
||||
* correct return value for ARMMMUIdx_E3 would be different for
|
||||
* that case, so we can't just make the function return the
|
||||
* correct value anyway; we would need an extra "bool e3_is_aarch32"
|
||||
* argument which all the current callsites would pass as 'false'.)
|
||||
*/
|
||||
static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_E30_3_PAN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
|
||||
{
|
||||
return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
|
||||
}
|
||||
|
||||
/* Return the exception level which controls this address translation regime */
|
||||
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_E2:
|
||||
return 2;
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_E30_0:
|
||||
case ARMMMUIdx_E30_3_PAN:
|
||||
return 3;
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_MPrivNegPri:
|
||||
case ARMMMUIdx_MUserNegPri:
|
||||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MUser:
|
||||
case ARMMMUIdx_MSPrivNegPri:
|
||||
case ARMMMUIdx_MSUserNegPri:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
case ARMMMUIdx_MSUser:
|
||||
return 1;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E30_0:
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_MUser:
|
||||
case ARMMMUIdx_MSUser:
|
||||
case ARMMMUIdx_MUserNegPri:
|
||||
case ARMMMUIdx_MSUserNegPri:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the SCTLR value which controls this address translation regime */
|
||||
static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
|
||||
return env->cp15.sctlr_el[regime_el(mmu_idx)];
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1160,13 +1063,13 @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
|
||||
return v;
|
||||
}
|
||||
return env->cp15.tcr_el[regime_el(env, mmu_idx)];
|
||||
return env->cp15.tcr_el[regime_el(mmu_idx)];
|
||||
}
|
||||
|
||||
/* Return true if the translation regime is using LPAE format page tables */
|
||||
static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
int el = regime_el(env, mmu_idx);
|
||||
int el = regime_el(mmu_idx);
|
||||
if (el == 2 || arm_el_is_aa64(env, el)) {
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1378,25 +1281,6 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
|
|||
ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* arm_mmu_idx_is_stage1_of_2:
|
||||
* @mmu_idx: The ARMMMUIdx to test
|
||||
*
|
||||
* Return true if @mmu_idx is a NOTLB mmu_idx that is the
|
||||
* first stage of a two stage regime.
|
||||
*/
|
||||
static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
|
||||
const ARMISARegisters *id)
|
||||
{
|
||||
|
|
@ -1491,7 +1375,7 @@ static inline int arm_granule_bits(ARMGranuleSize gran)
|
|||
|
||||
/*
|
||||
* Parameters of a given virtual address, as extracted from the
|
||||
* translation control register (TCR) for a given regime.
|
||||
* translation controls for a given regime.
|
||||
*/
|
||||
typedef struct ARMVAParameters {
|
||||
unsigned tsz : 8;
|
||||
|
|
@ -1506,6 +1390,7 @@ typedef struct ARMVAParameters {
|
|||
bool ha : 1;
|
||||
bool hd : 1;
|
||||
ARMGranuleSize gran : 2;
|
||||
bool pie : 1;
|
||||
} ARMVAParameters;
|
||||
|
||||
/**
|
||||
|
|
@ -1576,6 +1461,13 @@ typedef struct ARMCacheAttrs {
|
|||
typedef struct GetPhysAddrResult {
|
||||
CPUTLBEntryFull f;
|
||||
ARMCacheAttrs cacheattrs;
|
||||
/*
|
||||
* For ARMMMUIdx_Stage2*, the protection installed into f.prot
|
||||
* is the result for AccessType_TTW, i.e. the page table walk itself.
|
||||
* The protection installed info s2prot is the one to be merged
|
||||
* with the stage1 protection.
|
||||
*/
|
||||
int s2prot;
|
||||
} GetPhysAddrResult;
|
||||
|
||||
/**
|
||||
|
|
@ -1892,6 +1784,8 @@ void define_tlb_insn_regs(ARMCPU *cpu);
|
|||
void define_at_insn_regs(ARMCPU *cpu);
|
||||
/* Add the cpreg definitions for PM cpregs */
|
||||
void define_pm_cpregs(ARMCPU *cpu);
|
||||
/* Add the cpreg definitions for GCS cpregs */
|
||||
void define_gcs_cpregs(ARMCPU *cpu);
|
||||
|
||||
/* Effective value of MDCR_EL2 */
|
||||
static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
|
||||
|
|
@ -2003,8 +1897,13 @@ void vfp_clear_float_status_exc_flags(CPUARMState *env);
|
|||
*/
|
||||
void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
|
||||
bool arm_pan_enabled(CPUARMState *env);
|
||||
uint32_t cpsr_read_for_spsr_elx(CPUARMState *env);
|
||||
void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val);
|
||||
|
||||
/* Compare uint64_t for qsort and bsearch. */
|
||||
int compare_u64(const void *a, const void *b);
|
||||
|
||||
/* Used in FEAT_MEC to set the MECIDWidthm1 field in the MECIDR_EL2 register. */
|
||||
#define MECID_WIDTH 16
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -816,6 +816,80 @@ static const VMStateInfo vmstate_cpsr = {
|
|||
.put = put_cpsr,
|
||||
};
|
||||
|
||||
static int get_pstate64(QEMUFile *f, void *opaque, size_t size,
|
||||
const VMStateField *field)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t val = qemu_get_be64(f);
|
||||
|
||||
env->aarch64 = ((val & PSTATE_nRW) == 0);
|
||||
if (is_a64(env)) {
|
||||
pstate_write(env, val);
|
||||
} else {
|
||||
cpsr_write_from_spsr_elx(env, val);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int put_pstate64(QEMUFile *f, void *opaque, size_t size,
|
||||
const VMStateField *field, JSONWriter *vmdesc)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t val;
|
||||
|
||||
if (is_a64(env)) {
|
||||
val = pstate_read(env);
|
||||
} else {
|
||||
val = cpsr_read_for_spsr_elx(env);
|
||||
}
|
||||
qemu_put_be64(f, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pstate64_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t val;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
return false;
|
||||
}
|
||||
if (is_a64(env)) {
|
||||
val = pstate_read(env);
|
||||
} else {
|
||||
val = cpsr_read_for_spsr_elx(env);
|
||||
if (val & PSTATE_SS) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return val > UINT32_MAX;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_pstate64 = {
|
||||
.name = "cpu/pstate64",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = pstate64_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
{
|
||||
.name = "pstate64",
|
||||
.version_id = 0,
|
||||
.size = sizeof(uint64_t),
|
||||
.info = &(const VMStateInfo) {
|
||||
.name = "pstate64",
|
||||
.get = get_pstate64,
|
||||
.put = put_pstate64,
|
||||
},
|
||||
.flags = VMS_SINGLE,
|
||||
.offset = 0,
|
||||
},
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static int get_power(QEMUFile *f, void *opaque, size_t size,
|
||||
const VMStateField *field)
|
||||
{
|
||||
|
|
@ -848,6 +922,23 @@ static const VMStateInfo vmstate_powered_off = {
|
|||
.put = put_power,
|
||||
};
|
||||
|
||||
static bool syndrome64_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
return cpu->env.exception.syndrome > UINT32_MAX;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_syndrome64 = {
|
||||
.name = "cpu/syndrome64",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = syndrome64_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_UINT64(env.exception.syndrome, ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static int cpu_pre_save(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
|
@ -1035,6 +1126,12 @@ const VMStateDescription vmstate_arm_cpu = {
|
|||
VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
|
||||
VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
|
||||
VMSTATE_UINT64(env.pc, ARMCPU),
|
||||
/*
|
||||
* If any bits are set in the upper 32 bits of cpsr/pstate,
|
||||
* or if the cpu is in aa32 mode and PSTATE.SS is set, then
|
||||
* the cpu/pstate64 subsection will override this with the
|
||||
* full 64 bit state.
|
||||
*/
|
||||
{
|
||||
.name = "cpsr",
|
||||
.version_id = 0,
|
||||
|
|
@ -1065,7 +1162,19 @@ const VMStateDescription vmstate_arm_cpu = {
|
|||
VMSTATE_UINT64(env.exclusive_val, ARMCPU),
|
||||
VMSTATE_UINT64(env.exclusive_high, ARMCPU),
|
||||
VMSTATE_UNUSED(sizeof(uint64_t)),
|
||||
VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
|
||||
/*
|
||||
* If any bits are set in the upper 32 bits of syndrome,
|
||||
* then the cpu/syndrome64 subsection will override this
|
||||
* with the full 64 bit state.
|
||||
*/
|
||||
{
|
||||
.name = "env.exception.syndrome",
|
||||
.version_id = 0,
|
||||
.size = sizeof(uint32_t),
|
||||
.info = &vmstate_info_uint32,
|
||||
.flags = VMS_SINGLE,
|
||||
.offset = offsetoflow32(ARMCPU, env.exception.syndrome),
|
||||
},
|
||||
VMSTATE_UINT32(env.exception.fsr, ARMCPU),
|
||||
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
|
||||
VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
|
||||
|
|
@ -1098,6 +1207,8 @@ const VMStateDescription vmstate_arm_cpu = {
|
|||
&vmstate_serror,
|
||||
&vmstate_irq_line_state,
|
||||
&vmstate_wfxt_timer,
|
||||
&vmstate_syndrome64,
|
||||
&vmstate_pstate64,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -6,7 +6,12 @@ arm_ss.add(files(
|
|||
|
||||
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
|
||||
'cpu64.c',
|
||||
'gdbstub64.c'))
|
||||
'gdbstub64.c'
|
||||
))
|
||||
|
||||
arm_common_ss.add(files(
|
||||
'mmuidx.c',
|
||||
))
|
||||
|
||||
arm_system_ss = ss.source_set()
|
||||
arm_common_system_ss = ss.source_set()
|
||||
|
|
@ -22,6 +27,7 @@ arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
|
|||
'cpu32-stubs.c',
|
||||
))
|
||||
arm_user_ss.add(files(
|
||||
'cpregs-gcs.c',
|
||||
'cpregs-pmu.c',
|
||||
'debug_helper.c',
|
||||
'helper.c',
|
||||
|
|
@ -42,6 +48,7 @@ arm_common_system_ss.add(files(
|
|||
'arch_dump.c',
|
||||
'arm-powerctl.c',
|
||||
'cortex-regs.c',
|
||||
'cpregs-gcs.c',
|
||||
'cpregs-pmu.c',
|
||||
'cpu-irq.c',
|
||||
'debug_helper.c',
|
||||
|
|
|
|||
113
target/arm/mmuidx-internal.h
Normal file
113
target/arm/mmuidx-internal.h
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* QEMU Arm software mmu index internal definitions
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef TARGET_ARM_MMUIDX_INTERNAL_H
|
||||
#define TARGET_ARM_MMUIDX_INTERNAL_H
|
||||
|
||||
#include "mmuidx.h"
|
||||
#include "tcg/debug-assert.h"
|
||||
#include "hw/registerfields.h"
|
||||
|
||||
|
||||
FIELD(MMUIDXINFO, EL, 0, 2)
|
||||
FIELD(MMUIDXINFO, ELVALID, 2, 1)
|
||||
FIELD(MMUIDXINFO, REL, 3, 2)
|
||||
FIELD(MMUIDXINFO, RELVALID, 5, 1)
|
||||
FIELD(MMUIDXINFO, 2RANGES, 6, 1)
|
||||
FIELD(MMUIDXINFO, PAN, 7, 1)
|
||||
FIELD(MMUIDXINFO, USER, 8, 1)
|
||||
FIELD(MMUIDXINFO, STAGE1, 9, 1)
|
||||
FIELD(MMUIDXINFO, STAGE2, 10, 1)
|
||||
FIELD(MMUIDXINFO, GCS, 11, 1)
|
||||
FIELD(MMUIDXINFO, TG, 12, 5)
|
||||
|
||||
extern const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8];
|
||||
|
||||
#define arm_mmuidx_is_valid(x) ((unsigned)(x) < ARRAY_SIZE(arm_mmuidx_table))
|
||||
|
||||
/* Return the exception level associated with this mmu index. */
|
||||
static inline int arm_mmu_idx_to_el(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
tcg_debug_assert(FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, ELVALID));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, EL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the exception level for the address translation regime
|
||||
* associated with this mmu index.
|
||||
*/
|
||||
static inline uint32_t regime_el(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
tcg_debug_assert(FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, RELVALID));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, REL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if this address translation regime has two ranges.
|
||||
* Note that this will not return the correct answer for AArch32
|
||||
* Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
|
||||
* never called from a context where EL3 can be AArch32. (The
|
||||
* correct return value for ARMMMUIdx_E3 would be different for
|
||||
* that case, so we can't just make the function return the
|
||||
* correct value anyway; we would need an extra "bool e3_is_aarch32"
|
||||
* argument which all the current callsites would pass as 'false'.)
|
||||
*/
|
||||
static inline bool regime_has_2_ranges(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, 2RANGES);
|
||||
}
|
||||
|
||||
/* Return true if Privileged Access Never is enabled for this mmu index. */
|
||||
static inline bool regime_is_pan(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, PAN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the exception level associated with this mmu index is 0.
|
||||
* Differs from arm_mmu_idx_to_el(idx) == 0 in that this allows querying
|
||||
* Stage1 and Stage2 mmu indexes.
|
||||
*/
|
||||
static inline bool regime_is_user(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, USER);
|
||||
}
|
||||
|
||||
/* Return true if this mmu index is stage 1 of a 2-stage translation. */
|
||||
static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE1);
|
||||
}
|
||||
|
||||
/* Return true if this mmu index is stage 2 of a 2-stage translation. */
|
||||
static inline bool regime_is_stage2(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE2);
|
||||
}
|
||||
|
||||
/* Return true if this mmu index implies AccessType_GCS. */
|
||||
static inline bool regime_is_gcs(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, GCS);
|
||||
}
|
||||
|
||||
/* Return the GCS MMUIdx for a given regime. */
|
||||
static inline ARMMMUIdx regime_to_gcs(ARMMMUIdx idx)
|
||||
{
|
||||
tcg_debug_assert(arm_mmuidx_is_valid(idx));
|
||||
uint32_t core = FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, TG);
|
||||
tcg_debug_assert(core != 0); /* core 0 is E10_0, not a GCS index */
|
||||
return core | ARM_MMU_IDX_A;
|
||||
}
|
||||
|
||||
#endif /* TARGET_ARM_MMUIDX_INTERNAL_H */
|
||||
66
target/arm/mmuidx.c
Normal file
66
target/arm/mmuidx.c
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* QEMU Arm software mmu index definitions
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "mmuidx-internal.h"
|
||||
|
||||
|
||||
#define EL(X) ((X << R_MMUIDXINFO_EL_SHIFT) | R_MMUIDXINFO_ELVALID_MASK | \
|
||||
((X == 0) << R_MMUIDXINFO_USER_SHIFT))
|
||||
#define REL(X) ((X << R_MMUIDXINFO_REL_SHIFT) | R_MMUIDXINFO_RELVALID_MASK)
|
||||
#define R2 R_MMUIDXINFO_2RANGES_MASK
|
||||
#define PAN R_MMUIDXINFO_PAN_MASK
|
||||
#define USER R_MMUIDXINFO_USER_MASK
|
||||
#define S1 R_MMUIDXINFO_STAGE1_MASK
|
||||
#define S2 R_MMUIDXINFO_STAGE2_MASK
|
||||
#define GCS R_MMUIDXINFO_GCS_MASK
|
||||
#define TG(X) \
|
||||
((ARMMMUIdx_##X##_GCS & ARM_MMU_IDX_COREIDX_MASK) << R_MMUIDXINFO_TG_SHIFT)
|
||||
|
||||
const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8] = {
|
||||
/*
|
||||
* A-profile.
|
||||
*/
|
||||
[ARMMMUIdx_E10_0] = EL(0) | REL(1) | R2 | TG(E10_0),
|
||||
[ARMMMUIdx_E10_0_GCS] = EL(0) | REL(1) | R2 | GCS,
|
||||
[ARMMMUIdx_E10_1] = EL(1) | REL(1) | R2 | TG(E10_1),
|
||||
[ARMMMUIdx_E10_1_PAN] = EL(1) | REL(1) | R2 | TG(E10_1) | PAN,
|
||||
[ARMMMUIdx_E10_1_GCS] = EL(1) | REL(1) | R2 | GCS,
|
||||
|
||||
[ARMMMUIdx_E20_0] = EL(0) | REL(2) | R2 | TG(E20_0),
|
||||
[ARMMMUIdx_E20_0_GCS] = EL(0) | REL(2) | R2 | GCS,
|
||||
[ARMMMUIdx_E20_2] = EL(2) | REL(2) | R2 | TG(E20_2),
|
||||
[ARMMMUIdx_E20_2_PAN] = EL(2) | REL(2) | R2 | TG(E20_2) | PAN,
|
||||
[ARMMMUIdx_E20_2_GCS] = EL(2) | REL(2) | R2 | GCS,
|
||||
|
||||
[ARMMMUIdx_E2] = EL(2) | REL(2) | TG(E2),
|
||||
[ARMMMUIdx_E2_GCS] = EL(2) | REL(2) | GCS,
|
||||
|
||||
[ARMMMUIdx_E3] = EL(3) | REL(3) | TG(E3),
|
||||
[ARMMMUIdx_E3_GCS] = EL(3) | REL(3) | GCS,
|
||||
[ARMMMUIdx_E30_0] = EL(0) | REL(3),
|
||||
[ARMMMUIdx_E30_3_PAN] = EL(3) | REL(3) | PAN,
|
||||
|
||||
[ARMMMUIdx_Stage2_S] = REL(2) | S2,
|
||||
[ARMMMUIdx_Stage2] = REL(2) | S2,
|
||||
|
||||
[ARMMMUIdx_Stage1_E0] = REL(1) | R2 | S1 | USER | TG(Stage1_E0),
|
||||
[ARMMMUIdx_Stage1_E0_GCS] = REL(1) | R2 | S1 | USER | GCS,
|
||||
[ARMMMUIdx_Stage1_E1] = REL(1) | R2 | S1 | TG(Stage1_E1),
|
||||
[ARMMMUIdx_Stage1_E1_PAN] = REL(1) | R2 | S1 | TG(Stage1_E1) | PAN,
|
||||
[ARMMMUIdx_Stage1_E1_GCS] = REL(1) | R2 | S1 | GCS,
|
||||
|
||||
/*
|
||||
* M-profile.
|
||||
*/
|
||||
[ARMMMUIdx_MUser] = EL(0) | REL(1),
|
||||
[ARMMMUIdx_MPriv] = EL(1) | REL(1),
|
||||
[ARMMMUIdx_MUserNegPri] = EL(0) | REL(1),
|
||||
[ARMMMUIdx_MPrivNegPri] = EL(1) | REL(1),
|
||||
[ARMMMUIdx_MSUser] = EL(0) | REL(1),
|
||||
[ARMMMUIdx_MSPriv] = EL(1) | REL(1),
|
||||
[ARMMMUIdx_MSUserNegPri] = EL(0) | REL(1),
|
||||
[ARMMMUIdx_MSPrivNegPri] = EL(1) | REL(1),
|
||||
};
|
||||
241
target/arm/mmuidx.h
Normal file
241
target/arm/mmuidx.h
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
* QEMU Arm software mmu index definitions
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef TARGET_ARM_MMUIDX_H
|
||||
#define TARGET_ARM_MMUIDX_H
|
||||
|
||||
/*
|
||||
* Arm has the following "translation regimes" (as the Arm ARM calls them):
|
||||
*
|
||||
* If EL3 is 64-bit:
|
||||
* + NonSecure EL1 & 0 stage 1
|
||||
* + NonSecure EL1 & 0 stage 2
|
||||
* + NonSecure EL2
|
||||
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
|
||||
* + Secure EL1 & 0 stage 1
|
||||
* + Secure EL1 & 0 stage 2 (FEAT_SEL2)
|
||||
* + Secure EL2 (FEAT_SEL2)
|
||||
* + Secure EL2 & 0 (FEAT_SEL2)
|
||||
* + Realm EL1 & 0 stage 1 (FEAT_RME)
|
||||
* + Realm EL1 & 0 stage 2 (FEAT_RME)
|
||||
* + Realm EL2 (FEAT_RME)
|
||||
* + EL3
|
||||
* If EL3 is 32-bit:
|
||||
* + NonSecure PL1 & 0 stage 1
|
||||
* + NonSecure PL1 & 0 stage 2
|
||||
* + NonSecure PL2
|
||||
* + Secure PL1 & 0
|
||||
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
|
||||
*
|
||||
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
|
||||
* 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
|
||||
* because they may differ in access permissions even if the VA->PA map is
|
||||
* the same
|
||||
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
|
||||
* translation, which means that we have one mmu_idx that deals with two
|
||||
* concatenated translation regimes [this sort of combined s1+2 TLB is
|
||||
* architecturally permitted]
|
||||
* 3. we don't need to allocate an mmu_idx to translations that we won't be
|
||||
* handling via the TLB. The only way to do a stage 1 translation without
|
||||
* the immediate stage 2 translation is via the ATS or AT system insns,
|
||||
* which can be slow-pathed and always do a page table walk.
|
||||
* The only use of stage 2 translations is either as part of an s1+2
|
||||
* lookup or when loading the descriptors during a stage 1 page table walk,
|
||||
* and in both those cases we don't use the TLB.
|
||||
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
|
||||
* translation regimes, because they map reasonably well to each other
|
||||
* and they can't both be active at the same time.
|
||||
* 5. we want to be able to use the TLB for accesses done as part of a
|
||||
* stage1 page table walk, rather than having to walk the stage2 page
|
||||
* table over and over.
|
||||
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
|
||||
* Never (PAN) bit within PSTATE.
|
||||
* 7. we fold together most secure and non-secure regimes for A-profile,
|
||||
* because there are no banked system registers for aarch64, so the
|
||||
* process of switching between secure and non-secure is
|
||||
* already heavyweight.
|
||||
* 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
|
||||
* because both are in use simultaneously for Secure EL2.
|
||||
* 9. we need separate indexes for handling AccessType_GCS.
|
||||
*
|
||||
* This gives us the following list of cases:
|
||||
*
|
||||
* EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
|
||||
* EL0 EL1&0 stage 1+2 +GCS
|
||||
* EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
|
||||
* EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
|
||||
* EL1 EL1&0 stage 1+2 +GCS
|
||||
* EL0 EL2&0
|
||||
* EL0 EL2&0 +GCS
|
||||
* EL2 EL2&0
|
||||
* EL2 EL2&0 +PAN
|
||||
* EL2 EL2&0 +GCS
|
||||
* EL2 (aka NS PL2)
|
||||
* EL2 +GCS
|
||||
* EL3 (aka AArch32 S PL1 PL1&0)
|
||||
* EL3 +GCS
|
||||
* AArch32 S PL0 PL1&0 (we call this EL30_0)
|
||||
* AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
|
||||
* Stage2 Secure
|
||||
* Stage2 NonSecure
|
||||
* plus one TLB per Physical address space: S, NS, Realm, Root
|
||||
*
|
||||
* for a total of 22 different mmu_idx.
|
||||
*
|
||||
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
||||
* as A profile. They only need to distinguish EL0 and EL1 (and
|
||||
* EL2 for cores like the Cortex-R52).
|
||||
*
|
||||
* M profile CPUs are rather different as they do not have a true MMU.
|
||||
* They have the following different MMU indexes:
|
||||
* User
|
||||
* Privileged
|
||||
* User, execution priority negative (ie the MPU HFNMIENA bit may apply)
|
||||
* Privileged, execution priority negative (ditto)
|
||||
* If the CPU supports the v8M Security Extension then there are also:
|
||||
* Secure User
|
||||
* Secure Privileged
|
||||
* Secure User, execution priority negative
|
||||
* Secure Privileged, execution priority negative
|
||||
*
|
||||
* The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
|
||||
* are not quite the same -- different CPU types (most notably M profile
|
||||
* vs A/R profile) would like to use MMU indexes with different semantics,
|
||||
* but since we don't ever need to use all of those in a single CPU we
|
||||
* can avoid having to set NB_MMU_MODES to "total number of A profile MMU
|
||||
* modes + total number of M profile MMU modes". The lower bits of
|
||||
* ARMMMUIdx are the core TLB mmu index, and the higher bits are always
|
||||
* the same for any particular CPU.
|
||||
* Variables of type ARMMUIdx are always full values, and the core
|
||||
* index values are in variables of type 'int'.
|
||||
*
|
||||
* Our enumeration includes at the end some entries which are not "true"
|
||||
* mmu_idx values in that they don't have corresponding TLBs and are only
|
||||
* valid for doing slow path page table walks.
|
||||
*
|
||||
* The constant names here are patterned after the general style of the names
|
||||
* of the AT/ATS operations.
|
||||
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
|
||||
* For M profile we arrange them to have a bit for priv, a bit for negpri
|
||||
* and a bit for secure.
|
||||
*/
|
||||
#define ARM_MMU_IDX_A 0x20 /* A profile */
|
||||
#define ARM_MMU_IDX_NOTLB 0x40 /* does not have a TLB */
|
||||
#define ARM_MMU_IDX_M 0x80 /* M profile */
|
||||
|
||||
/* Meanings of the bits for M profile mmu idx values */
|
||||
#define ARM_MMU_IDX_M_PRIV 0x1
|
||||
#define ARM_MMU_IDX_M_NEGPRI 0x2
|
||||
#define ARM_MMU_IDX_M_S 0x4 /* Secure */
|
||||
|
||||
#define ARM_MMU_IDX_TYPE_MASK \
|
||||
(ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
|
||||
#define ARM_MMU_IDX_COREIDX_MASK 0x1f
|
||||
|
||||
typedef enum ARMMMUIdx {
|
||||
/*
|
||||
* A-profile.
|
||||
*/
|
||||
|
||||
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_0_GCS = 1 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E10_1_GCS = 4 | ARM_MMU_IDX_A,
|
||||
|
||||
ARMMMUIdx_E20_0 = 5 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_0_GCS = 6 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2 = 7 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2_PAN = 8 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E20_2_GCS = 9 | ARM_MMU_IDX_A,
|
||||
|
||||
ARMMMUIdx_E2 = 10 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E2_GCS = 11 | ARM_MMU_IDX_A,
|
||||
|
||||
ARMMMUIdx_E3 = 12 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E3_GCS = 13 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E30_0 = 14 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E30_3_PAN = 15 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* Used for second stage of an S12 page table walk, or for descriptor
|
||||
* loads during first stage of an S1 page table walk. Note that both
|
||||
* are in use simultaneously for SecureEL2: the security state for
|
||||
* the S2 ptw is selected by the NS bit from the S1 ptw.
|
||||
*/
|
||||
ARMMMUIdx_Stage2_S = 16 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Stage2 = 17 | ARM_MMU_IDX_A,
|
||||
|
||||
/* TLBs with 1-1 mapping to the physical address spaces. */
|
||||
ARMMMUIdx_Phys_S = 18 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_NS = 19 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_Root = 20 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_Realm = 21 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* These are not allocated TLBs and are used only for AT system
|
||||
* instructions or for the first stage of an S12 page table walk.
|
||||
*/
|
||||
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E0_GCS = 3 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1_GCS = 4 | ARM_MMU_IDX_NOTLB,
|
||||
|
||||
/*
|
||||
* M-profile.
|
||||
*/
|
||||
ARMMMUIdx_MUser = ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
|
||||
ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
|
||||
ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
|
||||
ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
|
||||
ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
|
||||
ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
|
||||
ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
|
||||
} ARMMMUIdx;
|
||||
|
||||
/*
|
||||
* Bit macros for the core-mmu-index values for each index,
|
||||
* for use when calling tlb_flush_by_mmuidx() and friends.
|
||||
*/
|
||||
#define TO_CORE_BIT(NAME) \
|
||||
ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
|
||||
|
||||
typedef enum ARMMMUIdxBit {
|
||||
TO_CORE_BIT(E10_0),
|
||||
TO_CORE_BIT(E10_0_GCS),
|
||||
TO_CORE_BIT(E10_1),
|
||||
TO_CORE_BIT(E10_1_PAN),
|
||||
TO_CORE_BIT(E10_1_GCS),
|
||||
TO_CORE_BIT(E20_0),
|
||||
TO_CORE_BIT(E20_0_GCS),
|
||||
TO_CORE_BIT(E20_2),
|
||||
TO_CORE_BIT(E20_2_PAN),
|
||||
TO_CORE_BIT(E20_2_GCS),
|
||||
TO_CORE_BIT(E2),
|
||||
TO_CORE_BIT(E2_GCS),
|
||||
TO_CORE_BIT(E3),
|
||||
TO_CORE_BIT(E3_GCS),
|
||||
TO_CORE_BIT(E30_0),
|
||||
TO_CORE_BIT(E30_3_PAN),
|
||||
TO_CORE_BIT(Stage2),
|
||||
TO_CORE_BIT(Stage2_S),
|
||||
|
||||
TO_CORE_BIT(MUser),
|
||||
TO_CORE_BIT(MPriv),
|
||||
TO_CORE_BIT(MUserNegPri),
|
||||
TO_CORE_BIT(MPrivNegPri),
|
||||
TO_CORE_BIT(MSUser),
|
||||
TO_CORE_BIT(MSPriv),
|
||||
TO_CORE_BIT(MSUserNegPri),
|
||||
TO_CORE_BIT(MSPrivNegPri),
|
||||
} ARMMMUIdxBit;
|
||||
|
||||
#undef TO_CORE_BIT
|
||||
|
||||
#define MMU_USER_IDX 0
|
||||
|
||||
#endif /* TARGET_ARM_MMUIDX_H */
|
||||
365
target/arm/ptw.c
365
target/arm/ptw.c
|
|
@ -79,6 +79,8 @@ typedef struct S1Translate {
|
|||
* may be suppressed for debug or AT insns.
|
||||
*/
|
||||
uint8_t in_prot_check;
|
||||
/* Cached EffectiveHCR_EL2_NVx() bit */
|
||||
bool in_nv1;
|
||||
bool out_rw;
|
||||
bool out_be;
|
||||
ARMSecuritySpace out_space;
|
||||
|
|
@ -167,6 +169,10 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
|
|||
return ARMMMUIdx_Stage1_E1;
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
return ARMMMUIdx_Stage1_E1_PAN;
|
||||
case ARMMMUIdx_E10_0_GCS:
|
||||
return ARMMMUIdx_Stage1_E0_GCS;
|
||||
case ARMMMUIdx_E10_1_GCS:
|
||||
return ARMMMUIdx_Stage1_E1_GCS;
|
||||
default:
|
||||
return mmu_idx;
|
||||
}
|
||||
|
|
@ -233,9 +239,9 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
|
|||
return env->cp15.vsttbr_el2;
|
||||
}
|
||||
if (ttbrn == 0) {
|
||||
return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
|
||||
return env->cp15.ttbr0_el[regime_el(mmu_idx)];
|
||||
} else {
|
||||
return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
|
||||
return env->cp15.ttbr1_el[regime_el(mmu_idx)];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -274,8 +280,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
|
||||
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_0_GCS:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E10_1_GCS:
|
||||
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
|
||||
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
|
||||
if (hcr_el2 & HCR_TGE) {
|
||||
|
|
@ -284,8 +292,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
break;
|
||||
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E0_GCS:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_Stage1_E1_GCS:
|
||||
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
|
||||
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
|
||||
if (hcr_el2 & HCR_DC) {
|
||||
|
|
@ -294,10 +304,14 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
break;
|
||||
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_0_GCS:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_E20_2_GCS:
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_E2_GCS:
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_E3_GCS:
|
||||
case ARMMMUIdx_E30_0:
|
||||
case ARMMMUIdx_E30_3_PAN:
|
||||
break;
|
||||
|
|
@ -998,7 +1012,7 @@ static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
int ap, int domain_prot)
|
||||
{
|
||||
return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
|
||||
regime_is_user(env, mmu_idx));
|
||||
regime_is_user(mmu_idx));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1024,7 +1038,7 @@ static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
|
|||
|
||||
static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
|
||||
{
|
||||
return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
|
||||
return simple_ap_to_rw_prot_is_user(ap, regime_is_user(mmu_idx));
|
||||
}
|
||||
|
||||
static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
|
||||
|
|
@ -1057,7 +1071,7 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
|
|||
}
|
||||
type = (desc & 3);
|
||||
domain = (desc >> 5) & 0x0f;
|
||||
if (regime_el(env, ptw->in_mmu_idx) == 1) {
|
||||
if (regime_el(ptw->in_mmu_idx) == 1) {
|
||||
dacr = env->cp15.dacr_ns;
|
||||
} else {
|
||||
dacr = env->cp15.dacr_s;
|
||||
|
|
@ -1196,7 +1210,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
|
|||
/* Page or Section. */
|
||||
domain = (desc >> 5) & 0x0f;
|
||||
}
|
||||
if (regime_el(env, mmu_idx) == 1) {
|
||||
if (regime_el(mmu_idx) == 1) {
|
||||
dacr = env->cp15.dacr_ns;
|
||||
} else {
|
||||
dacr = env->cp15.dacr_s;
|
||||
|
|
@ -1314,7 +1328,7 @@ do_fault:
|
|||
* @xn: XN (execute-never) bits
|
||||
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
|
||||
*/
|
||||
static int get_S2prot_noexecute(int s2ap)
|
||||
static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
||||
{
|
||||
int prot = 0;
|
||||
|
||||
|
|
@ -1324,12 +1338,6 @@ static int get_S2prot_noexecute(int s2ap)
|
|||
if (s2ap & 2) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
return prot;
|
||||
}
|
||||
|
||||
static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
||||
{
|
||||
int prot = get_S2prot_noexecute(s2ap);
|
||||
|
||||
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
|
||||
switch (xn) {
|
||||
|
|
@ -1361,6 +1369,44 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
|||
return prot;
|
||||
}
|
||||
|
||||
static int get_S2prot_indirect(CPUARMState *env, GetPhysAddrResult *result,
|
||||
int pi_index, int po_index, bool s1_is_el0)
|
||||
{
|
||||
/* Last index is (priv, unpriv, ttw) */
|
||||
static const uint8_t perm_table[16][3] = {
|
||||
/* 0 */ { 0, 0, 0 }, /* no access */
|
||||
/* 1 */ { 0, 0, 0 }, /* reserved */
|
||||
/* 2 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
|
||||
/* 3 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
|
||||
/* 4 */ { PAGE_WRITE, PAGE_WRITE, 0 },
|
||||
/* 5 */ { 0, 0, 0 }, /* reserved */
|
||||
/* 6 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
|
||||
/* 7 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
|
||||
/* 8 */ { PAGE_READ, PAGE_READ, PAGE_READ },
|
||||
/* 9 */ { PAGE_READ, PAGE_READ | PAGE_EXEC, PAGE_READ },
|
||||
/* A */ { PAGE_READ | PAGE_EXEC, PAGE_READ, PAGE_READ },
|
||||
/* B */ { PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_EXEC, PAGE_READ },
|
||||
/* C */ { PAGE_READ | PAGE_WRITE,
|
||||
PAGE_READ | PAGE_WRITE,
|
||||
PAGE_READ | PAGE_WRITE },
|
||||
/* D */ { PAGE_READ | PAGE_WRITE,
|
||||
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||
PAGE_READ | PAGE_WRITE },
|
||||
/* E */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||
PAGE_READ | PAGE_WRITE,
|
||||
PAGE_READ | PAGE_WRITE },
|
||||
/* F */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||
PAGE_READ | PAGE_WRITE },
|
||||
};
|
||||
|
||||
uint64_t pir = (env->cp15.scr_el3 & SCR_PIEN ? env->cp15.s2pir_el2 : 0);
|
||||
int s2pi = extract64(pir, pi_index * 4, 4);
|
||||
|
||||
result->f.prot = perm_table[s2pi][2];
|
||||
return perm_table[s2pi][s1_is_el0];
|
||||
}
|
||||
|
||||
/*
|
||||
* Translate section/page access permissions to protection flags
|
||||
* @env: CPUARMState
|
||||
|
|
@ -1378,7 +1424,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
|||
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
bool is_user = regime_is_user(env, mmu_idx);
|
||||
bool is_user = regime_is_user(mmu_idx);
|
||||
bool have_wxn;
|
||||
int wxn = 0;
|
||||
|
||||
|
|
@ -1395,10 +1441,10 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
|||
* We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
|
||||
* do not affect EPAN.
|
||||
*/
|
||||
if (user_rw && regime_is_pan(env, mmu_idx)) {
|
||||
if (user_rw && regime_is_pan(mmu_idx)) {
|
||||
prot_rw = 0;
|
||||
} else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
|
||||
regime_is_pan(env, mmu_idx) &&
|
||||
regime_is_pan(mmu_idx) &&
|
||||
(regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
|
||||
prot_rw = 0;
|
||||
}
|
||||
|
|
@ -1455,7 +1501,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
|||
xn = pxn || (user_rw & PAGE_WRITE);
|
||||
}
|
||||
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
switch (regime_el(env, mmu_idx)) {
|
||||
switch (regime_el(mmu_idx)) {
|
||||
case 1:
|
||||
case 3:
|
||||
if (is_user) {
|
||||
|
|
@ -1482,11 +1528,115 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
|||
return prot_rw | PAGE_EXEC;
|
||||
}
|
||||
|
||||
/* Extra page permission bits, during get_S1prot_indirect only. */
|
||||
#define PAGE_GCS (1 << 3)
|
||||
#define PAGE_WXN (1 << 4)
|
||||
#define PAGE_OVERLAY (1 << 5)
|
||||
QEMU_BUILD_BUG_ON(PAGE_RWX & (PAGE_GCS | PAGE_WXN | PAGE_OVERLAY));
|
||||
|
||||
static int get_S1prot_indirect(CPUARMState *env, S1Translate *ptw,
|
||||
ARMMMUIdx mmu_idx, int pi_index, int po_index,
|
||||
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
|
||||
{
|
||||
static const uint8_t perm_table[16] = {
|
||||
/* 0 */ PAGE_OVERLAY, /* no access */
|
||||
/* 1 */ PAGE_OVERLAY | PAGE_READ,
|
||||
/* 2 */ PAGE_OVERLAY | PAGE_EXEC,
|
||||
/* 3 */ PAGE_OVERLAY | PAGE_READ | PAGE_EXEC,
|
||||
/* 4 */ PAGE_OVERLAY, /* reserved */
|
||||
/* 5 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE,
|
||||
/* 6 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_WXN,
|
||||
/* 7 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||
/* 8 */ PAGE_READ,
|
||||
/* 9 */ PAGE_READ | PAGE_GCS,
|
||||
/* A */ PAGE_READ | PAGE_EXEC,
|
||||
/* B */ 0, /* reserved */
|
||||
/* C */ PAGE_READ | PAGE_WRITE,
|
||||
/* D */ 0, /* reserved */
|
||||
/* E */ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
|
||||
/* F */ 0, /* reserved */
|
||||
};
|
||||
|
||||
uint32_t el = regime_el(mmu_idx);
|
||||
uint64_t pir = env->cp15.pir_el[el];
|
||||
uint64_t pire0 = 0;
|
||||
int perm;
|
||||
|
||||
if (el < 3) {
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_PIEN)) {
|
||||
pir = 0;
|
||||
} else if (el == 2) {
|
||||
pire0 = env->cp15.pire0_el2;
|
||||
} else if (!ptw->in_nv1) {
|
||||
pire0 = env->cp15.pir_el[0];
|
||||
}
|
||||
}
|
||||
perm = perm_table[extract64(pir, pi_index * 4, 4)];
|
||||
|
||||
if (regime_has_2_ranges(mmu_idx)) {
|
||||
int p_perm = perm;
|
||||
int u_perm = perm_table[extract64(pire0, pi_index * 4, 4)];
|
||||
|
||||
if ((p_perm & (PAGE_EXEC | PAGE_GCS)) &&
|
||||
(u_perm & (PAGE_WRITE | PAGE_GCS))) {
|
||||
p_perm &= ~(PAGE_RWX | PAGE_GCS);
|
||||
u_perm &= ~(PAGE_RWX | PAGE_GCS);
|
||||
}
|
||||
if ((u_perm & (PAGE_RWX | PAGE_GCS)) && regime_is_pan(mmu_idx)) {
|
||||
p_perm &= ~(PAGE_READ | PAGE_WRITE);
|
||||
}
|
||||
perm = regime_is_user(mmu_idx) ? u_perm : p_perm;
|
||||
}
|
||||
|
||||
if (in_pa != out_pa) {
|
||||
switch (in_pa) {
|
||||
case ARMSS_Root:
|
||||
/*
|
||||
* R_ZWRVD: permission fault for insn fetched from non-Root,
|
||||
* I_WWBFB: SIF has no effect in EL3.
|
||||
*/
|
||||
perm &= ~(PAGE_EXEC | PAGE_GCS);
|
||||
break;
|
||||
case ARMSS_Realm:
|
||||
/*
|
||||
* R_PKTDS: permission fault for insn fetched from non-Realm,
|
||||
* for Realm EL2 or EL2&0. The corresponding fault for EL1&0
|
||||
* happens during any stage2 translation.
|
||||
*/
|
||||
if (el == 2) {
|
||||
perm &= ~(PAGE_EXEC | PAGE_GCS);
|
||||
}
|
||||
break;
|
||||
case ARMSS_Secure:
|
||||
if (env->cp15.scr_el3 & SCR_SIF) {
|
||||
perm &= ~(PAGE_EXEC | PAGE_GCS);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Input NonSecure must have output NonSecure. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
if (regime_is_gcs(mmu_idx)) {
|
||||
/*
|
||||
* Note that the one s1perms.gcs bit controls both read and write
|
||||
* access via AccessType_GCS. See AArch64.S1CheckPermissions.
|
||||
*/
|
||||
perm = (perm & PAGE_GCS ? PAGE_READ | PAGE_WRITE : 0);
|
||||
} else if (perm & PAGE_WXN) {
|
||||
perm &= ~PAGE_EXEC;
|
||||
}
|
||||
|
||||
return perm & PAGE_RWX;
|
||||
}
|
||||
|
||||
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
|
||||
ARMMMUIdx mmu_idx)
|
||||
{
|
||||
uint64_t tcr = regime_tcr(env, mmu_idx);
|
||||
uint32_t el = regime_el(env, mmu_idx);
|
||||
uint32_t el = regime_el(mmu_idx);
|
||||
int select, tsz;
|
||||
bool epd, hpd;
|
||||
|
||||
|
|
@ -1507,8 +1657,12 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
|
|||
}
|
||||
tsz = sextract32(tcr, 0, 4) + 8;
|
||||
select = 0;
|
||||
hpd = false;
|
||||
epd = false;
|
||||
/*
|
||||
* Stage2 does not have hierarchical permissions.
|
||||
* Thus disabling them makes things easier during ptw.
|
||||
*/
|
||||
hpd = true;
|
||||
} else if (el == 2) {
|
||||
/* HTCR */
|
||||
tsz = extract32(tcr, 0, 3);
|
||||
|
|
@ -1673,12 +1827,6 @@ static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
|
|||
}
|
||||
}
|
||||
|
||||
static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
|
||||
{
|
||||
uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
|
||||
return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
|
||||
*
|
||||
|
|
@ -1713,8 +1861,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
int32_t stride;
|
||||
int addrsize, inputsize, outputsize;
|
||||
uint64_t tcr = regime_tcr(env, mmu_idx);
|
||||
int ap, xn, pxn;
|
||||
uint32_t el = regime_el(env, mmu_idx);
|
||||
int ap, prot;
|
||||
uint32_t el = regime_el(mmu_idx);
|
||||
uint64_t descaddrmask;
|
||||
bool aarch64 = arm_el_is_aa64(env, el);
|
||||
uint64_t descriptor, new_descriptor;
|
||||
|
|
@ -1730,6 +1878,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
!arm_el_is_aa64(env, 1));
|
||||
level = 0;
|
||||
|
||||
/*
|
||||
* Cache NV1 before we adjust ptw->in_space for NSTable.
|
||||
* Note that this is only relevant for EL1&0, and that
|
||||
* computing it would assert for ARMSS_Root.
|
||||
*/
|
||||
if (el == 1) {
|
||||
uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
|
||||
ptw->in_nv1 = (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
|
||||
}
|
||||
|
||||
/*
|
||||
* If TxSZ is programmed to a value larger than the maximum,
|
||||
* or smaller than the effective minimum, it is IMPLEMENTATION
|
||||
|
|
@ -2014,21 +2172,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
* except NSTable (which we have already handled).
|
||||
*/
|
||||
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
|
||||
if (!regime_is_stage2(mmu_idx)) {
|
||||
if (!param.hpd) {
|
||||
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
|
||||
/*
|
||||
* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
|
||||
* means "force PL1 access only", which means forcing AP[1] to 0.
|
||||
*/
|
||||
attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
|
||||
attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
|
||||
}
|
||||
if (!param.hpd) {
|
||||
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
|
||||
/*
|
||||
* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
|
||||
* means "force PL1 access only", which means forcing AP[1] to 0.
|
||||
*/
|
||||
attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
|
||||
attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
|
||||
}
|
||||
|
||||
ap = extract32(attrs, 6, 2);
|
||||
out_space = ptw->cur_space;
|
||||
if (regime_is_stage2(mmu_idx)) {
|
||||
if (param.pie) {
|
||||
int pi = extract64(attrs, 6, 1)
|
||||
| (extract64(attrs, 51, 1) << 1)
|
||||
| (extract64(attrs, 53, 2) << 2);
|
||||
int po = extract64(attrs, 60, 3);
|
||||
prot = get_S2prot_indirect(env, result, pi, po, ptw->in_s1_is_el0);
|
||||
} else {
|
||||
int xn = extract64(attrs, 53, 2);
|
||||
prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
|
||||
/* Install TTW permissions in f.prot. */
|
||||
result->f.prot = prot & (PAGE_READ | PAGE_WRITE);
|
||||
}
|
||||
/*
|
||||
* R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
|
||||
* The bit remains ignored for other security states.
|
||||
|
|
@ -2037,11 +2205,9 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
*/
|
||||
if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
|
||||
out_space = ARMSS_NonSecure;
|
||||
result->f.prot = get_S2prot_noexecute(ap);
|
||||
} else {
|
||||
xn = extract64(attrs, 53, 2);
|
||||
result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
|
||||
prot &= ~PAGE_EXEC;
|
||||
}
|
||||
result->s2prot = prot;
|
||||
|
||||
result->cacheattrs.is_s2_format = true;
|
||||
result->cacheattrs.attrs = extract32(attrs, 2, 4);
|
||||
|
|
@ -2055,7 +2221,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
int nse, ns = extract32(attrs, 5, 1);
|
||||
uint8_t attrindx;
|
||||
uint64_t mair;
|
||||
int user_rw, prot_rw;
|
||||
|
||||
switch (out_space) {
|
||||
case ARMSS_Root:
|
||||
|
|
@ -2104,33 +2269,57 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
xn = extract64(attrs, 54, 1);
|
||||
pxn = extract64(attrs, 53, 1);
|
||||
|
||||
if (el == 1 && nv_nv1_enabled(env, ptw)) {
|
||||
if (param.pie) {
|
||||
int pi = extract64(attrs, 6, 1)
|
||||
| (extract64(attrs, 51, 1) << 1)
|
||||
| (extract64(attrs, 53, 2) << 2);
|
||||
int po = extract64(attrs, 60, 3);
|
||||
/*
|
||||
* With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
|
||||
* descriptor bit 54 holds PXN, 53 is RES0, and the effective value
|
||||
* of UXN is 0. Similarly for bits 59 and 60 in table descriptors
|
||||
* (which we have already folded into bits 53 and 54 of attrs).
|
||||
* AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
|
||||
* Similarly, APTable[0] from the table descriptor is treated as 0;
|
||||
* we already folded this into AP[1] and squashing that to 0 does
|
||||
* the right thing.
|
||||
* Note that we modified ptw->in_space earlier for NSTable, but
|
||||
* result->f.attrs retains a copy of the original security space.
|
||||
*/
|
||||
pxn = xn;
|
||||
xn = 0;
|
||||
ap &= ~1;
|
||||
}
|
||||
prot = get_S1prot_indirect(env, ptw, mmu_idx, pi, po,
|
||||
result->f.attrs.space, out_space);
|
||||
} else if (regime_is_gcs(mmu_idx)) {
|
||||
/*
|
||||
* While one must use indirect permissions to successfully
|
||||
* use GCS instructions, AArch64.S1DirectBasePermissions
|
||||
* faithfully supplies s1perms.gcs = 0, Just In Case.
|
||||
*/
|
||||
prot = 0;
|
||||
} else {
|
||||
int xn = extract64(attrs, 54, 1);
|
||||
int pxn = extract64(attrs, 53, 1);
|
||||
int user_rw, prot_rw;
|
||||
|
||||
user_rw = simple_ap_to_rw_prot_is_user(ap, true);
|
||||
prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
|
||||
result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
|
||||
xn, pxn, ptw->in_space, out_space);
|
||||
if (el == 1 && ptw->in_nv1) {
|
||||
/*
|
||||
* With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1},
|
||||
* the block/page descriptor bit 54 holds PXN,
|
||||
* 53 is RES0, and the effective value of UXN is 0.
|
||||
* Similarly for bits 59 and 60 in table descriptors
|
||||
* (which we have already folded into bits 53 and 54 of attrs).
|
||||
* AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
|
||||
* Similarly, APTable[0] from the table descriptor is treated
|
||||
* as 0; we already folded this into AP[1] and squashing
|
||||
* that to 0 does the right thing.
|
||||
*/
|
||||
pxn = xn;
|
||||
xn = 0;
|
||||
ap &= ~1;
|
||||
}
|
||||
|
||||
user_rw = simple_ap_to_rw_prot_is_user(ap, true);
|
||||
prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
|
||||
prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
|
||||
xn, pxn, ptw->in_space, out_space);
|
||||
}
|
||||
result->f.prot = prot;
|
||||
|
||||
/* Index into MAIR registers for cache attributes */
|
||||
attrindx = extract32(attrs, 2, 3);
|
||||
mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
||||
mair = env->cp15.mair_el[regime_el(mmu_idx)];
|
||||
assert(attrindx <= 7);
|
||||
result->cacheattrs.is_s2_format = false;
|
||||
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
|
||||
|
|
@ -2172,11 +2361,27 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
result->f.tlb_fill_flags = 0;
|
||||
}
|
||||
|
||||
if (ptw->in_prot_check & ~result->f.prot) {
|
||||
if (ptw->in_prot_check & ~prot) {
|
||||
fi->type = ARMFault_Permission;
|
||||
goto do_fault;
|
||||
}
|
||||
|
||||
/* S1PIE and S2PIE both have a bit for software dirty page tracking. */
|
||||
if (access_type == MMU_DATA_STORE && param.pie) {
|
||||
/*
|
||||
* For S1PIE, bit 7 is nDirty and both HA and HD are checked.
|
||||
* For S2PIE, bit 7 is Dirty and only HD is checked.
|
||||
*/
|
||||
bool bit7 = extract64(attrs, 7, 1);
|
||||
if (regime_is_stage2(mmu_idx)
|
||||
? !bit7 && !param.hd
|
||||
: bit7 && !(param.ha && param.hd)) {
|
||||
fi->type = ARMFault_Permission;
|
||||
fi->dirtybit = true;
|
||||
goto do_fault;
|
||||
}
|
||||
}
|
||||
|
||||
/* If FEAT_HAFDBS has made changes, update the PTE. */
|
||||
if (new_descriptor != descriptor) {
|
||||
new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
|
||||
|
|
@ -2239,7 +2444,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env,
|
|||
uint32_t mask;
|
||||
uint32_t base;
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
bool is_user = regime_is_user(env, mmu_idx);
|
||||
bool is_user = regime_is_user(mmu_idx);
|
||||
|
||||
if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
|
||||
/* MPU disabled. */
|
||||
|
|
@ -2406,7 +2611,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env,
|
|||
ARMCPU *cpu = env_archcpu(env);
|
||||
int n;
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
bool is_user = regime_is_user(env, mmu_idx);
|
||||
bool is_user = regime_is_user(mmu_idx);
|
||||
bool secure = arm_space_is_secure(ptw->in_space);
|
||||
|
||||
result->f.phys_addr = address;
|
||||
|
|
@ -2592,7 +2797,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env,
|
|||
static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
uint32_t secure)
|
||||
{
|
||||
if (regime_el(env, mmu_idx) == 2) {
|
||||
if (regime_el(mmu_idx) == 2) {
|
||||
return env->pmsav8.hprbar;
|
||||
} else {
|
||||
return env->pmsav8.rbar[secure];
|
||||
|
|
@ -2602,7 +2807,7 @@ static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
uint32_t secure)
|
||||
{
|
||||
if (regime_el(env, mmu_idx) == 2) {
|
||||
if (regime_el(mmu_idx) == 2) {
|
||||
return env->pmsav8.hprlar;
|
||||
} else {
|
||||
return env->pmsav8.rlar[secure];
|
||||
|
|
@ -2626,7 +2831,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|||
* memory system to use a subpage.
|
||||
*/
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
bool is_user = regime_is_user(env, mmu_idx);
|
||||
bool is_user = regime_is_user(mmu_idx);
|
||||
int n;
|
||||
int matchregion = -1;
|
||||
bool hit = false;
|
||||
|
|
@ -2634,7 +2839,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|||
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
|
||||
int region_counter;
|
||||
|
||||
if (regime_el(env, mmu_idx) == 2) {
|
||||
if (regime_el(mmu_idx) == 2) {
|
||||
region_counter = cpu->pmsav8r_hdregion;
|
||||
} else {
|
||||
region_counter = cpu->pmsav7_dregion;
|
||||
|
|
@ -2760,7 +2965,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|||
xn = 1;
|
||||
}
|
||||
|
||||
if (regime_el(env, mmu_idx) == 2) {
|
||||
if (regime_el(mmu_idx) == 2) {
|
||||
result->f.prot = simple_ap_to_rw_prot_is_user(ap,
|
||||
mmu_idx != ARMMMUIdx_E2);
|
||||
} else {
|
||||
|
|
@ -2769,7 +2974,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|||
|
||||
if (!arm_feature(env, ARM_FEATURE_M)) {
|
||||
uint8_t attrindx = extract32(matched_rlar, 1, 3);
|
||||
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
||||
uint64_t mair = env->cp15.mair_el[regime_el(mmu_idx)];
|
||||
uint8_t sh = extract32(matched_rlar, 3, 2);
|
||||
|
||||
if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
|
||||
|
|
@ -2777,7 +2982,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|||
xn = 0x1;
|
||||
}
|
||||
|
||||
if ((regime_el(env, mmu_idx) == 1) &&
|
||||
if ((regime_el(mmu_idx) == 1) &&
|
||||
regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
|
||||
pxn = 0x1;
|
||||
}
|
||||
|
|
@ -3262,7 +3467,7 @@ static bool get_phys_addr_disabled(CPUARMState *env,
|
|||
break;
|
||||
|
||||
default:
|
||||
r_el = regime_el(env, mmu_idx);
|
||||
r_el = regime_el(mmu_idx);
|
||||
if (arm_el_is_aa64(env, r_el)) {
|
||||
int pamax = arm_pamax(env_archcpu(env));
|
||||
uint64_t tcr = env->cp15.tcr_el[r_el];
|
||||
|
|
@ -3370,7 +3575,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
|||
fi->s2addr = ipa;
|
||||
|
||||
/* Combine the S1 and S2 perms. */
|
||||
result->f.prot &= s1_prot;
|
||||
result->f.prot = s1_prot & result->s2prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
|
|
@ -3507,7 +3712,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|||
break;
|
||||
}
|
||||
|
||||
result->f.attrs.user = regime_is_user(env, mmu_idx);
|
||||
result->f.attrs.user = regime_is_user(mmu_idx);
|
||||
|
||||
/*
|
||||
* Fast Context Switch Extension. This doesn't exist at all in v8.
|
||||
|
|
@ -3515,7 +3720,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|||
*/
|
||||
if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
|
||||
&& !arm_feature(env, ARM_FEATURE_V8)) {
|
||||
if (regime_el(env, mmu_idx) == 3) {
|
||||
if (regime_el(mmu_idx) == 3) {
|
||||
address += env->cp15.fcseidr_s;
|
||||
} else {
|
||||
address += env->cp15.fcseidr_ns;
|
||||
|
|
@ -3617,15 +3822,22 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
case ARMMMUIdx_E10_0_GCS:
|
||||
case ARMMMUIdx_E10_1:
|
||||
case ARMMMUIdx_E10_1_PAN:
|
||||
case ARMMMUIdx_E10_1_GCS:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_0_GCS:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
case ARMMMUIdx_E20_2_GCS:
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E0_GCS:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_Stage1_E1_GCS:
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_E2_GCS:
|
||||
ss = arm_security_space_below_el3(env);
|
||||
break;
|
||||
case ARMMMUIdx_Stage2:
|
||||
|
|
@ -3654,6 +3866,7 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
ss = ARMSS_Secure;
|
||||
break;
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_E3_GCS:
|
||||
case ARMMMUIdx_E30_0:
|
||||
case ARMMMUIdx_E30_3_PAN:
|
||||
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ enum arm_exception_class {
|
|||
EC_MOP = 0x27,
|
||||
EC_AA32_FPTRAP = 0x28,
|
||||
EC_AA64_FPTRAP = 0x2c,
|
||||
EC_GCS = 0x2d,
|
||||
EC_SERROR = 0x2f,
|
||||
EC_BREAKPOINT = 0x30,
|
||||
EC_BREAKPOINT_SAME_EL = 0x31,
|
||||
|
|
@ -83,6 +84,23 @@ typedef enum {
|
|||
SME_ET_InaccessibleZT0,
|
||||
} SMEExceptionType;
|
||||
|
||||
typedef enum {
|
||||
GCS_ET_DataCheck,
|
||||
GCS_ET_EXLOCK,
|
||||
GCS_ET_GCSSTR_GCSSTTR,
|
||||
} GCSExceptionType;
|
||||
|
||||
typedef enum {
|
||||
GCS_IT_RET_nPauth = 0,
|
||||
GCS_IT_GCSPOPM = 1,
|
||||
GCS_IT_RET_PauthA = 2,
|
||||
GCS_IT_RET_PauthB = 3,
|
||||
GCS_IT_GCSSS1 = 4,
|
||||
GCS_IT_GCSSS2 = 5,
|
||||
GCS_IT_GCSPOPCX = 8,
|
||||
GCS_IT_GCSPOPX = 9,
|
||||
} GCSInstructionType;
|
||||
|
||||
#define ARM_EL_EC_LENGTH 6
|
||||
#define ARM_EL_EC_SHIFT 26
|
||||
#define ARM_EL_IL_SHIFT 25
|
||||
|
|
@ -351,6 +369,23 @@ static inline uint32_t syn_pcalignment(void)
|
|||
return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
|
||||
}
|
||||
|
||||
static inline uint32_t syn_gcs_data_check(GCSInstructionType it, int rn)
|
||||
{
|
||||
return ((EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL |
|
||||
(GCS_ET_DataCheck << 20) | (rn << 5) | it);
|
||||
}
|
||||
|
||||
static inline uint32_t syn_gcs_exlock(void)
|
||||
{
|
||||
return (EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL | (GCS_ET_EXLOCK << 20);
|
||||
}
|
||||
|
||||
static inline uint32_t syn_gcs_gcsstr(int ra, int rn)
|
||||
{
|
||||
return ((EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL |
|
||||
(GCS_ET_GCSSTR_GCSSTTR << 20) | (ra << 10) | (rn << 5));
|
||||
}
|
||||
|
||||
static inline uint32_t syn_serror(uint32_t extra)
|
||||
{
|
||||
return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra;
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
|
|||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
|
||||
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint64_t syndrome,
|
||||
uint32_t target_el, uintptr_t ra)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
|
|
|
|||
|
|
@ -248,6 +248,7 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
|
|||
AUTIA1716 1101 0101 0000 0011 0010 0001 100 11111
|
||||
AUTIB1716 1101 0101 0000 0011 0010 0001 110 11111
|
||||
ESB 1101 0101 0000 0011 0010 0010 000 11111
|
||||
GCSB 1101 0101 0000 0011 0010 0010 011 11111
|
||||
PACIAZ 1101 0101 0000 0011 0010 0011 000 11111
|
||||
PACIASP 1101 0101 0000 0011 0010 0011 001 11111
|
||||
PACIBZ 1101 0101 0000 0011 0010 0011 010 11111
|
||||
|
|
@ -256,6 +257,7 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
|
|||
AUTIASP 1101 0101 0000 0011 0010 0011 101 11111
|
||||
AUTIBZ 1101 0101 0000 0011 0010 0011 110 11111
|
||||
AUTIBSP 1101 0101 0000 0011 0010 0011 111 11111
|
||||
CHKFEAT 1101 0101 0000 0011 0010 0101 000 11111
|
||||
]
|
||||
# The canonical NOP has CRm == op2 == 0, but all of the space
|
||||
# that isn't specifically allocated to an instruction must NOP
|
||||
|
|
@ -570,6 +572,9 @@ LDAPR_i 10 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext
|
|||
LDAPR_i 00 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=0
|
||||
LDAPR_i 01 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=1
|
||||
|
||||
# GCSSTR, GCSSTTR
|
||||
GCSSTR 11011001 000 11111 000 unpriv:1 11 rn:5 rt:5
|
||||
|
||||
# Load/store multiple structures
|
||||
# The 4-bit opcode in [15:12] encodes repeat count and structure elements
|
||||
&ldst_mult rm rn rt sz q p rpt selem
|
||||
|
|
|
|||
|
|
@ -1280,6 +1280,7 @@ void aarch64_max_tcg_initfn(Object *obj)
|
|||
t = FIELD_DP64(t, ID_AA64PFR1, SME, 2); /* FEAT_SME2 */
|
||||
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */
|
||||
t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */
|
||||
t = FIELD_DP64(t, ID_AA64PFR1, GCS, 1); /* FEAT_GCS */
|
||||
SET_IDREG(isar, ID_AA64PFR1, t);
|
||||
|
||||
t = GET_IDREG(isar, ID_AA64MMFR0);
|
||||
|
|
@ -1326,7 +1327,10 @@ void aarch64_max_tcg_initfn(Object *obj)
|
|||
t = GET_IDREG(isar, ID_AA64MMFR3);
|
||||
t = FIELD_DP64(t, ID_AA64MMFR3, TCRX, 1); /* FEAT_TCR2 */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR3, SCTLRX, 1); /* FEAT_SCTLR2 */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR3, MEC, 1); /* FEAT_MEC */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR3, S1PIE, 1); /* FEAT_S1PIE */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR3, S2PIE, 1); /* FEAT_S2PIE */
|
||||
SET_IDREG(isar, ID_AA64MMFR3, t);
|
||||
|
||||
t = GET_IDREG(isar, ID_AA64ZFR0);
|
||||
|
|
|
|||
|
|
@ -576,6 +576,7 @@ uint32_t HELPER(advsimd_rinth)(uint32_t x, float_status *fp_status)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static int el_from_spsr(uint32_t spsr)
|
||||
{
|
||||
/* Return the exception level that this SPSR is requesting a return to,
|
||||
|
|
@ -614,32 +615,12 @@ static int el_from_spsr(uint32_t spsr)
|
|||
}
|
||||
}
|
||||
|
||||
static void cpsr_write_from_spsr_elx(CPUARMState *env,
|
||||
uint32_t val)
|
||||
{
|
||||
uint32_t mask;
|
||||
|
||||
/* Save SPSR_ELx.SS into PSTATE. */
|
||||
env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
|
||||
val &= ~PSTATE_SS;
|
||||
|
||||
/* Move DIT to the correct location for CPSR */
|
||||
if (val & PSTATE_DIT) {
|
||||
val &= ~PSTATE_DIT;
|
||||
val |= CPSR_DIT;
|
||||
}
|
||||
|
||||
mask = aarch32_cpsr_valid_mask(env->features, \
|
||||
&env_archcpu(env)->isar);
|
||||
cpsr_write(env, val, mask, CPSRWriteRaw);
|
||||
}
|
||||
|
||||
void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
int cur_el = arm_current_el(env);
|
||||
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
|
||||
uint32_t spsr = env->banked_spsr[spsr_idx];
|
||||
uint64_t spsr = env->banked_spsr[spsr_idx];
|
||||
int new_el;
|
||||
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
|
||||
|
||||
|
|
@ -694,6 +675,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
|
|||
goto illegal_return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If GetCurrentEXLOCKEN, the exception return path must use GCSPOPCX,
|
||||
* which will set PSTATE.EXLOCK. We need not explicitly check FEAT_GCS,
|
||||
* because GCSCR_ELx cannot be set without it.
|
||||
*/
|
||||
if (new_el == cur_el &&
|
||||
(env->cp15.gcscr_el[cur_el] & GCSCR_EXLOCKEN) &&
|
||||
!(env->pstate & PSTATE_EXLOCK)) {
|
||||
goto illegal_return;
|
||||
}
|
||||
|
||||
bql_lock();
|
||||
arm_call_pre_el_change_hook(cpu);
|
||||
bql_unlock();
|
||||
|
|
@ -787,6 +779,7 @@ illegal_return:
|
|||
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
|
||||
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -80,7 +80,6 @@ DEF_HELPER_3(vfp_ah_maxh, f16, f16, f16, fpst)
|
|||
DEF_HELPER_3(vfp_ah_maxs, f32, f32, f32, fpst)
|
||||
DEF_HELPER_3(vfp_ah_maxd, f64, f64, f64, fpst)
|
||||
|
||||
DEF_HELPER_2(exception_return, void, env, i64)
|
||||
DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64)
|
||||
|
||||
DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64)
|
||||
|
|
@ -145,3 +144,7 @@ DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32
|
|||
DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
|
||||
DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
|
||||
DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
DEF_HELPER_2(exception_return, void, env, i64)
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -451,6 +451,44 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
|||
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
|
||||
/* C.f. GCSEnabled */
|
||||
if (env->cp15.gcscr_el[el] & GCSCR_PCRSEL) {
|
||||
switch (el) {
|
||||
default:
|
||||
if (!el_is_in_host(env, el)
|
||||
&& !(arm_hcrx_el2_eff(env) & HCRX_GCSEN)) {
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case 2:
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_GCSEN)) {
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case 3:
|
||||
DP_TBFLAG_A64(flags, GCS_EN, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* C.f. GCSReturnValueCheckEnabled */
|
||||
if (env->cp15.gcscr_el[el] & GCSCR_RVCHKEN) {
|
||||
DP_TBFLAG_A64(flags, GCS_RVCEN, 1);
|
||||
}
|
||||
|
||||
/* C.f. CheckGCSSTREnabled */
|
||||
if (!(env->cp15.gcscr_el[el] & GCSCR_STREN)) {
|
||||
DP_TBFLAG_A64(flags, GCSSTR_EL, el ? el : 1);
|
||||
} else if (el == 1
|
||||
&& EX_TBFLAG_ANY(flags, FGT_ACTIVE)
|
||||
&& !FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR],
|
||||
HFGITR_EL2, NGCSSTR_EL1)) {
|
||||
DP_TBFLAG_A64(flags, GCSSTR_EL, 2);
|
||||
}
|
||||
}
|
||||
|
||||
if (env->vfp.fpcr & FPCR_AH) {
|
||||
DP_TBFLAG_A64(flags, AH, 1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -605,7 +605,7 @@ void mte_check_fail(CPUARMState *env, uint32_t desc,
|
|||
int el, reg_el, tcf;
|
||||
uint64_t sctlr;
|
||||
|
||||
reg_el = regime_el(env, arm_mmu_idx);
|
||||
reg_el = regime_el(arm_mmu_idx);
|
||||
sctlr = env->cp15.sctlr_el[reg_el];
|
||||
|
||||
switch (arm_mmu_idx) {
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ int exception_target_el(CPUARMState *env)
|
|||
}
|
||||
|
||||
void raise_exception(CPUARMState *env, uint32_t excp,
|
||||
uint32_t syndrome, uint32_t target_el)
|
||||
uint64_t syndrome, uint32_t target_el)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
|
|
@ -70,7 +70,7 @@ void raise_exception(CPUARMState *env, uint32_t excp,
|
|||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
|
||||
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint64_t syndrome,
|
||||
uint32_t target_el, uintptr_t ra)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
|
@ -881,6 +881,13 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
|
|||
}
|
||||
syndrome = syn_uncategorized();
|
||||
break;
|
||||
case CP_ACCESS_EXLOCK:
|
||||
/*
|
||||
* CP_ACCESS_EXLOCK is always directed to the current EL,
|
||||
* which is going to be the same as the usual target EL.
|
||||
*/
|
||||
syndrome = syn_gcs_exlock();
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,7 +149,8 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
CPUState *cs = env_cpu(env);
|
||||
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr,
|
||||
ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
|
||||
}
|
||||
|
||||
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -159,7 +160,8 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
|
||||
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
ARMMMUIdxBit_E2);
|
||||
ARMMMUIdxBit_E2 |
|
||||
ARMMMUIdxBit_E2_GCS);
|
||||
}
|
||||
|
||||
static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -202,7 +204,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
|
||||
}
|
||||
|
||||
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -210,7 +212,8 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2 |
|
||||
ARMMMUIdxBit_E2_GCS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -228,12 +231,16 @@ static int vae1_tlbmask(CPUARMState *env)
|
|||
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
|
||||
mask = ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E20_0;
|
||||
ARMMMUIdxBit_E20_2_GCS |
|
||||
ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_0_GCS;
|
||||
} else {
|
||||
/* This is AArch64 only, so we don't need to touch the EL30_x TLBs */
|
||||
mask = ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0;
|
||||
ARMMMUIdxBit_E10_1_GCS |
|
||||
ARMMMUIdxBit_E10_0 |
|
||||
ARMMMUIdxBit_E10_0_GCS;
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
|
@ -246,13 +253,20 @@ static int vae2_tlbmask(CPUARMState *env)
|
|||
if (hcr & HCR_E2H) {
|
||||
mask = ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E20_0;
|
||||
ARMMMUIdxBit_E20_2_GCS |
|
||||
ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_0_GCS;
|
||||
} else {
|
||||
mask = ARMMMUIdxBit_E2;
|
||||
mask = ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS;
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int vae3_tlbmask(void)
|
||||
{
|
||||
return ARMMMUIdxBit_E3 | ARMMMUIdxBit_E3_GCS;
|
||||
}
|
||||
|
||||
/* Return 56 if TBI is enabled, 64 otherwise. */
|
||||
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
uint64_t addr)
|
||||
|
|
@ -325,9 +339,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
static int e2_tlbmask(CPUARMState *env)
|
||||
{
|
||||
return (ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E20_0_GCS |
|
||||
ARMMMUIdxBit_E20_2 |
|
||||
ARMMMUIdxBit_E20_2_PAN |
|
||||
ARMMMUIdxBit_E2);
|
||||
ARMMMUIdxBit_E20_2_GCS |
|
||||
ARMMMUIdxBit_E2 |
|
||||
ARMMMUIdxBit_E2_GCS);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -354,7 +371,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
ARMCPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
|
||||
tlb_flush_by_mmuidx(cs, vae3_tlbmask());
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -380,7 +397,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, vae3_tlbmask());
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -411,7 +428,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
CPUState *cs = CPU(cpu);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, vae3_tlbmask());
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
@ -465,7 +482,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
|
||||
|
||||
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
ARMMMUIdxBit_E3, bits);
|
||||
vae3_tlbmask(), bits);
|
||||
}
|
||||
|
||||
static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
|
||||
|
|
@ -963,7 +980,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env,
|
|||
* flush-last-level-only.
|
||||
*/
|
||||
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
|
||||
do_rvae_write(env, value, vae3_tlbmask(), tlb_force_broadcast(env));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
|
||||
|
|
@ -977,7 +994,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
|
|||
* flush-last-level-only or inner/outer specific flushes.
|
||||
*/
|
||||
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
|
||||
do_rvae_write(env, value, vae3_tlbmask(), true);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
|
|
|||
|
|
@ -24,13 +24,13 @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
return regime_using_lpae_format(env, mmu_idx);
|
||||
}
|
||||
|
||||
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
||||
static inline uint64_t merge_syn_data_abort(uint32_t template_syn,
|
||||
ARMMMUFaultInfo *fi,
|
||||
unsigned int target_el,
|
||||
bool same_el, bool is_write,
|
||||
int fsc)
|
||||
int fsc, bool gcs)
|
||||
{
|
||||
uint32_t syn;
|
||||
uint64_t syn;
|
||||
|
||||
/*
|
||||
* ISV is only set for stage-2 data aborts routed to EL2 and
|
||||
|
|
@ -75,6 +75,11 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
|
|||
/* Merge the runtime syndrome with the template syndrome. */
|
||||
syn |= template_syn;
|
||||
}
|
||||
|
||||
/* Form ISS2 at the top of the syndrome. */
|
||||
syn |= (uint64_t)fi->dirtybit << 37;
|
||||
syn |= (uint64_t)gcs << 40;
|
||||
|
||||
return syn;
|
||||
}
|
||||
|
||||
|
|
@ -176,7 +181,9 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
|
|||
int target_el = exception_target_el(env);
|
||||
int current_el = arm_current_el(env);
|
||||
bool same_el;
|
||||
uint32_t syn, exc, fsr, fsc;
|
||||
uint32_t exc, fsr, fsc;
|
||||
uint64_t syn;
|
||||
|
||||
/*
|
||||
* We know this must be a data or insn abort, and that
|
||||
* env->exception.syndrome contains the template syndrome set
|
||||
|
|
@ -246,9 +253,10 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
|
|||
syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
|
||||
exc = EXCP_PREFETCH_ABORT;
|
||||
} else {
|
||||
bool gcs = regime_is_gcs(core_to_arm_mmu_idx(env, mmu_idx));
|
||||
syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el,
|
||||
same_el, access_type == MMU_DATA_STORE,
|
||||
fsc);
|
||||
fsc, gcs);
|
||||
if (access_type == MMU_DATA_STORE
|
||||
&& arm_feature(env, ARM_FEATURE_V6)) {
|
||||
fsr |= (1 << 11);
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
#include "cpregs.h"
|
||||
|
||||
static TCGv_i64 cpu_X[32];
|
||||
static TCGv_i64 cpu_gcspr[4];
|
||||
static TCGv_i64 cpu_pc;
|
||||
|
||||
/* Load/store exclusive handling */
|
||||
|
|
@ -77,6 +78,10 @@ static int scale_by_log2_tag_granule(DisasContext *s, int x)
|
|||
/* initialize TCG globals. */
|
||||
void a64_translate_init(void)
|
||||
{
|
||||
static const char gcspr_names[4][12] = {
|
||||
"gcspr_el0", "gcspr_el1", "gcspr_el2", "gcspr_el3"
|
||||
};
|
||||
|
||||
int i;
|
||||
|
||||
cpu_pc = tcg_global_mem_new_i64(tcg_env,
|
||||
|
|
@ -90,10 +95,17 @@ void a64_translate_init(void)
|
|||
|
||||
cpu_exclusive_high = tcg_global_mem_new_i64(tcg_env,
|
||||
offsetof(CPUARMState, exclusive_high), "exclusive_high");
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
cpu_gcspr[i] =
|
||||
tcg_global_mem_new_i64(tcg_env,
|
||||
offsetof(CPUARMState, cp15.gcspr_el[i]),
|
||||
gcspr_names[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the core mmu_idx to use for A64 load/store insns which
|
||||
* Return the full arm mmu_idx to use for A64 load/store insns which
|
||||
* have a "unprivileged load/store" variant. Those insns access
|
||||
* EL0 if executed from an EL which has control over EL0 (usually
|
||||
* EL1) but behave like normal loads and stores if executed from
|
||||
|
|
@ -103,7 +115,7 @@ void a64_translate_init(void)
|
|||
* normal encoding (in which case we will return the same
|
||||
* thing as get_mem_index().
|
||||
*/
|
||||
static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
|
||||
static ARMMMUIdx full_a64_user_mem_index(DisasContext *s, bool unpriv)
|
||||
{
|
||||
/*
|
||||
* If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
|
||||
|
|
@ -130,7 +142,19 @@ static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
|
|||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
return arm_to_core_mmu_idx(useridx);
|
||||
return useridx;
|
||||
}
|
||||
|
||||
/* Return the core mmu_idx per above. */
|
||||
static int core_a64_user_mem_index(DisasContext *s, bool unpriv)
|
||||
{
|
||||
return arm_to_core_mmu_idx(full_a64_user_mem_index(s, unpriv));
|
||||
}
|
||||
|
||||
/* For a given translation regime, return the core mmu_idx for gcs access. */
|
||||
static int core_gcs_mem_index(ARMMMUIdx armidx)
|
||||
{
|
||||
return arm_to_core_mmu_idx(regime_to_gcs(armidx));
|
||||
}
|
||||
|
||||
static void set_btype_raw(int val)
|
||||
|
|
@ -408,6 +432,39 @@ static MemOp check_ordered_align(DisasContext *s, int rn, int imm,
|
|||
return finalize_memop(s, mop);
|
||||
}
|
||||
|
||||
static void gen_add_gcs_record(DisasContext *s, TCGv_i64 value)
|
||||
{
|
||||
TCGv_i64 addr = tcg_temp_new_i64();
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
|
||||
tcg_gen_addi_i64(addr, gcspr, -8);
|
||||
tcg_gen_qemu_st_i64(value, clean_data_tbi(s, addr), mmuidx, mop);
|
||||
tcg_gen_mov_i64(gcspr, addr);
|
||||
}
|
||||
|
||||
static void gen_load_check_gcs_record(DisasContext *s, TCGv_i64 target,
|
||||
GCSInstructionType it, int rt)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 rec_va = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_qemu_ld_i64(rec_va, clean_data_tbi(s, gcspr), mmuidx, mop);
|
||||
|
||||
if (s->gcs_rvcen) {
|
||||
TCGLabel *fail_label =
|
||||
delay_exception(s, EXCP_UDEF, syn_gcs_data_check(it, rt));
|
||||
|
||||
tcg_gen_brcond_i64(TCG_COND_NE, rec_va, target, fail_label);
|
||||
}
|
||||
|
||||
gen_a64_set_pc(s, rec_va);
|
||||
tcg_gen_addi_i64(gcspr, gcspr, 8);
|
||||
}
|
||||
|
||||
typedef struct DisasCompare64 {
|
||||
TCGCond cond;
|
||||
TCGv_i64 value;
|
||||
|
|
@ -1642,7 +1699,14 @@ static bool trans_B(DisasContext *s, arg_i *a)
|
|||
|
||||
static bool trans_BL(DisasContext *s, arg_i *a)
|
||||
{
|
||||
gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
|
||||
TCGv_i64 link = tcg_temp_new_i64();
|
||||
|
||||
gen_pc_plus_diff(s, link, 4);
|
||||
if (s->gcs_en) {
|
||||
gen_add_gcs_record(s, link);
|
||||
}
|
||||
tcg_gen_mov_i64(cpu_reg(s, 30), link);
|
||||
|
||||
reset_btype(s);
|
||||
gen_goto_tb(s, 0, a->imm);
|
||||
return true;
|
||||
|
|
@ -1739,15 +1803,15 @@ static bool trans_BR(DisasContext *s, arg_r *a)
|
|||
|
||||
static bool trans_BLR(DisasContext *s, arg_r *a)
|
||||
{
|
||||
TCGv_i64 dst = cpu_reg(s, a->rn);
|
||||
TCGv_i64 lr = cpu_reg(s, 30);
|
||||
if (dst == lr) {
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
tcg_gen_mov_i64(tmp, dst);
|
||||
dst = tmp;
|
||||
TCGv_i64 link = tcg_temp_new_i64();
|
||||
|
||||
gen_pc_plus_diff(s, link, 4);
|
||||
if (s->gcs_en) {
|
||||
gen_add_gcs_record(s, link);
|
||||
}
|
||||
gen_pc_plus_diff(s, lr, curr_insn_len(s));
|
||||
gen_a64_set_pc(s, dst);
|
||||
gen_a64_set_pc(s, cpu_reg(s, a->rn));
|
||||
tcg_gen_mov_i64(cpu_reg(s, 30), link);
|
||||
|
||||
set_btype_for_blr(s);
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
return true;
|
||||
|
|
@ -1755,7 +1819,13 @@ static bool trans_BLR(DisasContext *s, arg_r *a)
|
|||
|
||||
static bool trans_RET(DisasContext *s, arg_r *a)
|
||||
{
|
||||
gen_a64_set_pc(s, cpu_reg(s, a->rn));
|
||||
TCGv_i64 target = cpu_reg(s, a->rn);
|
||||
|
||||
if (s->gcs_en) {
|
||||
gen_load_check_gcs_record(s, target, GCS_IT_RET_nPauth, a->rn);
|
||||
} else {
|
||||
gen_a64_set_pc(s, target);
|
||||
}
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1799,21 +1869,21 @@ static bool trans_BRAZ(DisasContext *s, arg_braz *a)
|
|||
|
||||
static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
|
||||
{
|
||||
TCGv_i64 dst, lr;
|
||||
TCGv_i64 dst, link;
|
||||
|
||||
if (!dc_isar_feature(aa64_pauth, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
|
||||
lr = cpu_reg(s, 30);
|
||||
if (dst == lr) {
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
tcg_gen_mov_i64(tmp, dst);
|
||||
dst = tmp;
|
||||
|
||||
link = tcg_temp_new_i64();
|
||||
gen_pc_plus_diff(s, link, 4);
|
||||
if (s->gcs_en) {
|
||||
gen_add_gcs_record(s, link);
|
||||
}
|
||||
gen_pc_plus_diff(s, lr, curr_insn_len(s));
|
||||
gen_a64_set_pc(s, dst);
|
||||
tcg_gen_mov_i64(cpu_reg(s, 30), link);
|
||||
|
||||
set_btype_for_blr(s);
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
return true;
|
||||
|
|
@ -1828,7 +1898,12 @@ static bool trans_RETA(DisasContext *s, arg_reta *a)
|
|||
}
|
||||
|
||||
dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
|
||||
gen_a64_set_pc(s, dst);
|
||||
if (s->gcs_en) {
|
||||
GCSInstructionType it = a->m ? GCS_IT_RET_PauthB : GCS_IT_RET_PauthA;
|
||||
gen_load_check_gcs_record(s, dst, it, 30);
|
||||
} else {
|
||||
gen_a64_set_pc(s, dst);
|
||||
}
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1849,20 +1924,21 @@ static bool trans_BRA(DisasContext *s, arg_bra *a)
|
|||
|
||||
static bool trans_BLRA(DisasContext *s, arg_bra *a)
|
||||
{
|
||||
TCGv_i64 dst, lr;
|
||||
TCGv_i64 dst, link;
|
||||
|
||||
if (!dc_isar_feature(aa64_pauth, s)) {
|
||||
return false;
|
||||
}
|
||||
dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
|
||||
lr = cpu_reg(s, 30);
|
||||
if (dst == lr) {
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
tcg_gen_mov_i64(tmp, dst);
|
||||
dst = tmp;
|
||||
|
||||
link = tcg_temp_new_i64();
|
||||
gen_pc_plus_diff(s, link, 4);
|
||||
if (s->gcs_en) {
|
||||
gen_add_gcs_record(s, link);
|
||||
}
|
||||
gen_pc_plus_diff(s, lr, curr_insn_len(s));
|
||||
gen_a64_set_pc(s, dst);
|
||||
tcg_gen_mov_i64(cpu_reg(s, 30), link);
|
||||
|
||||
set_btype_for_blr(s);
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
return true;
|
||||
|
|
@ -1870,6 +1946,9 @@ static bool trans_BLRA(DisasContext *s, arg_bra *a)
|
|||
|
||||
static bool trans_ERET(DisasContext *s, arg_ERET *a)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return false;
|
||||
#else
|
||||
TCGv_i64 dst;
|
||||
|
||||
if (s->current_el == 0) {
|
||||
|
|
@ -1889,10 +1968,14 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a)
|
|||
/* Must exit loop to check un-masked IRQs */
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool trans_ERETA(DisasContext *s, arg_reta *a)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return false;
|
||||
#else
|
||||
TCGv_i64 dst;
|
||||
|
||||
if (!dc_isar_feature(aa64_pauth, s)) {
|
||||
|
|
@ -1918,6 +2001,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a)
|
|||
/* Must exit loop to check un-masked IRQs */
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool trans_NOP(DisasContext *s, arg_NOP *a)
|
||||
|
|
@ -2060,6 +2144,14 @@ static bool trans_ESB(DisasContext *s, arg_ESB *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool trans_GCSB(DisasContext *s, arg_GCSB *a)
|
||||
{
|
||||
if (dc_isar_feature(aa64_gcs, s)) {
|
||||
tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a)
|
||||
{
|
||||
if (s->pauth_active) {
|
||||
|
|
@ -2124,6 +2216,20 @@ static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool trans_CHKFEAT(DisasContext *s, arg_CHKFEAT *a)
|
||||
{
|
||||
uint64_t feat_en = 0;
|
||||
|
||||
if (s->gcs_en) {
|
||||
feat_en |= 1 << 0;
|
||||
}
|
||||
if (feat_en) {
|
||||
TCGv_i64 x16 = cpu_reg(s, 16);
|
||||
tcg_gen_andi_i64(x16, x16, ~feat_en);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
|
||||
{
|
||||
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
|
||||
|
|
@ -2455,6 +2561,182 @@ static void gen_sysreg_undef(DisasContext *s, bool isread,
|
|||
gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
|
||||
}
|
||||
|
||||
static void gen_gcspopm(DisasContext *s, int rt)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 value = tcg_temp_new_i64();
|
||||
TCGLabel *fail_label =
|
||||
delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPM, rt));
|
||||
|
||||
/* The value at top-of-stack must have low 2 bits clear. */
|
||||
tcg_gen_qemu_ld_i64(value, clean_data_tbi(s, gcspr), mmuidx, mop);
|
||||
tcg_gen_brcondi_i64(TCG_COND_TSTNE, value, 3, fail_label);
|
||||
|
||||
/* Complete the pop and return the value. */
|
||||
tcg_gen_addi_i64(gcspr, gcspr, 8);
|
||||
tcg_gen_mov_i64(cpu_reg(s, rt), value);
|
||||
}
|
||||
|
||||
static void gen_gcspushx(DisasContext *s)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int spsr_idx = aarch64_banked_spsr_index(s->current_el);
|
||||
int spsr_off = offsetof(CPUARMState, banked_spsr[spsr_idx]);
|
||||
int elr_off = offsetof(CPUARMState, elr_el[s->current_el]);
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 addr = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_addi_i64(addr, gcspr, -8);
|
||||
tcg_gen_qemu_st_i64(cpu_reg(s, 30), addr, mmuidx, mop);
|
||||
|
||||
tcg_gen_ld_i64(tmp, tcg_env, spsr_off);
|
||||
tcg_gen_addi_i64(addr, addr, -8);
|
||||
tcg_gen_qemu_st_i64(tmp, addr, mmuidx, mop);
|
||||
|
||||
tcg_gen_ld_i64(tmp, tcg_env, elr_off);
|
||||
tcg_gen_addi_i64(addr, addr, -8);
|
||||
tcg_gen_qemu_st_i64(tmp, addr, mmuidx, mop);
|
||||
|
||||
tcg_gen_addi_i64(addr, addr, -8);
|
||||
tcg_gen_qemu_st_i64(tcg_constant_i64(0b1001), addr, mmuidx, mop);
|
||||
|
||||
tcg_gen_mov_i64(gcspr, addr);
|
||||
clear_pstate_bits(PSTATE_EXLOCK);
|
||||
}
|
||||
|
||||
static void gen_gcspopcx(DisasContext *s)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int spsr_idx = aarch64_banked_spsr_index(s->current_el);
|
||||
int spsr_off = offsetof(CPUARMState, banked_spsr[spsr_idx]);
|
||||
int elr_off = offsetof(CPUARMState, elr_el[s->current_el]);
|
||||
int gcscr_off = offsetof(CPUARMState, cp15.gcscr_el[s->current_el]);
|
||||
int pstate_off = offsetof(CPUARMState, pstate);
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 addr = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp1 = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp2 = tcg_temp_new_i64();
|
||||
TCGLabel *fail_label =
|
||||
delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPCX, 31));
|
||||
|
||||
/* The value at top-of-stack must be an exception token. */
|
||||
tcg_gen_qemu_ld_i64(tmp1, gcspr, mmuidx, mop);
|
||||
tcg_gen_brcondi_i64(TCG_COND_NE, tmp1, 0b1001, fail_label);
|
||||
|
||||
/* Validate in turn, ELR ... */
|
||||
tcg_gen_addi_i64(addr, gcspr, 8);
|
||||
tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop);
|
||||
tcg_gen_ld_i64(tmp2, tcg_env, elr_off);
|
||||
tcg_gen_brcond_i64(TCG_COND_NE, tmp1, tmp2, fail_label);
|
||||
|
||||
/* ... SPSR ... */
|
||||
tcg_gen_addi_i64(addr, addr, 8);
|
||||
tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop);
|
||||
tcg_gen_ld_i64(tmp2, tcg_env, spsr_off);
|
||||
tcg_gen_brcond_i64(TCG_COND_NE, tmp1, tmp2, fail_label);
|
||||
|
||||
/* ... and LR. */
|
||||
tcg_gen_addi_i64(addr, addr, 8);
|
||||
tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop);
|
||||
tcg_gen_brcond_i64(TCG_COND_NE, tmp1, cpu_reg(s, 30), fail_label);
|
||||
|
||||
/* Writeback stack pointer after pop. */
|
||||
tcg_gen_addi_i64(gcspr, addr, 8);
|
||||
|
||||
/* PSTATE.EXLOCK = GetCurrentEXLOCKEN(). */
|
||||
tcg_gen_ld_i64(tmp1, tcg_env, gcscr_off);
|
||||
tcg_gen_ld_i64(tmp2, tcg_env, pstate_off);
|
||||
tcg_gen_shri_i64(tmp1, tmp1, ctz64(GCSCR_EXLOCKEN));
|
||||
tcg_gen_deposit_i64(tmp2, tmp2, tmp1, ctz64(PSTATE_EXLOCK), 1);
|
||||
tcg_gen_st_i64(tmp2, tcg_env, pstate_off);
|
||||
}
|
||||
|
||||
static void gen_gcspopx(DisasContext *s)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 addr = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
TCGLabel *fail_label =
|
||||
delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPX, 31));
|
||||
|
||||
/* The value at top-of-stack must be an exception token. */
|
||||
tcg_gen_qemu_ld_i64(tmp, gcspr, mmuidx, mop);
|
||||
tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0b1001, fail_label);
|
||||
|
||||
/*
|
||||
* The other three values in the exception return record
|
||||
* are ignored, but are loaded anyway to raise faults.
|
||||
*/
|
||||
tcg_gen_addi_i64(addr, gcspr, 8);
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop);
|
||||
tcg_gen_addi_i64(addr, addr, 8);
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop);
|
||||
tcg_gen_addi_i64(addr, addr, 8);
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop);
|
||||
tcg_gen_addi_i64(gcspr, addr, 8);
|
||||
}
|
||||
|
||||
static void gen_gcsss1(DisasContext *s, int rt)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 inptr = cpu_reg(s, rt);
|
||||
TCGv_i64 cmp = tcg_temp_new_i64();
|
||||
TCGv_i64 new = tcg_temp_new_i64();
|
||||
TCGv_i64 old = tcg_temp_new_i64();
|
||||
TCGLabel *fail_label =
|
||||
delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSSS1, rt));
|
||||
|
||||
/* Compute the valid cap entry that the new stack must have. */
|
||||
tcg_gen_deposit_i64(cmp, inptr, tcg_constant_i64(1), 0, 12);
|
||||
/* Compute the in-progress cap entry for the old stack. */
|
||||
tcg_gen_deposit_i64(new, gcspr, tcg_constant_i64(5), 0, 3);
|
||||
|
||||
/* Swap the valid cap the with the in-progress cap. */
|
||||
tcg_gen_atomic_cmpxchg_i64(old, inptr, cmp, new, mmuidx, mop);
|
||||
tcg_gen_brcond_i64(TCG_COND_NE, old, cmp, fail_label);
|
||||
|
||||
/* The new stack had a valid cap: change gcspr. */
|
||||
tcg_gen_andi_i64(gcspr, inptr, ~7);
|
||||
}
|
||||
|
||||
static void gen_gcsss2(DisasContext *s, int rt)
|
||||
{
|
||||
TCGv_i64 gcspr = cpu_gcspr[s->current_el];
|
||||
int mmuidx = core_gcs_mem_index(s->mmu_idx);
|
||||
MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
|
||||
TCGv_i64 outptr = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
TCGLabel *fail_label =
|
||||
delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSSS2, rt));
|
||||
|
||||
/* Validate that the new stack has an in-progress cap. */
|
||||
tcg_gen_qemu_ld_i64(outptr, gcspr, mmuidx, mop);
|
||||
tcg_gen_andi_i64(tmp, outptr, 7);
|
||||
tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 5, fail_label);
|
||||
|
||||
/* Push a valid cap to the old stack. */
|
||||
tcg_gen_andi_i64(outptr, outptr, ~7);
|
||||
tcg_gen_addi_i64(outptr, outptr, -8);
|
||||
tcg_gen_deposit_i64(tmp, outptr, tcg_constant_i64(1), 0, 12);
|
||||
tcg_gen_qemu_st_i64(tmp, outptr, mmuidx, mop);
|
||||
tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
|
||||
|
||||
/* Pop the in-progress cap from the new stack. */
|
||||
tcg_gen_addi_i64(gcspr, gcspr, 8);
|
||||
|
||||
/* Return a pointer to the old stack cap. */
|
||||
tcg_gen_mov_i64(cpu_reg(s, rt), outptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Look up @key, returning the cpreg, which must exist.
|
||||
* Additionally, the new cpreg must also be accessible.
|
||||
|
|
@ -2761,6 +3043,51 @@ static void handle_sys(DisasContext *s, bool isread,
|
|||
}
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSPUSHM:
|
||||
if (s->gcs_en) {
|
||||
gen_add_gcs_record(s, cpu_reg(s, rt));
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSPOPM:
|
||||
/* Note that X[rt] is unchanged if !GCSEnabled. */
|
||||
if (s->gcs_en) {
|
||||
gen_gcspopm(s, rt);
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSPUSHX:
|
||||
/* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */
|
||||
if (rt != 31) {
|
||||
unallocated_encoding(s);
|
||||
} else if (s->gcs_en) {
|
||||
gen_gcspushx(s);
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSPOPCX:
|
||||
/* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */
|
||||
if (rt != 31) {
|
||||
unallocated_encoding(s);
|
||||
} else if (s->gcs_en) {
|
||||
gen_gcspopcx(s);
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSPOPX:
|
||||
/* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */
|
||||
if (rt != 31) {
|
||||
unallocated_encoding(s);
|
||||
} else if (s->gcs_en) {
|
||||
gen_gcspopx(s);
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSSS1:
|
||||
if (s->gcs_en) {
|
||||
gen_gcsss1(s, rt);
|
||||
}
|
||||
return;
|
||||
case ARM_CP_GCSSS2:
|
||||
if (s->gcs_en) {
|
||||
gen_gcsss2(s, rt);
|
||||
}
|
||||
return;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
|
@ -3555,7 +3882,7 @@ static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a,
|
|||
if (!a->p) {
|
||||
tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
|
||||
}
|
||||
memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
memidx = core_a64_user_mem_index(s, a->unpriv);
|
||||
*clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
|
||||
a->w || a->rn != 31,
|
||||
mop, a->unpriv, memidx);
|
||||
|
|
@ -3576,7 +3903,7 @@ static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a)
|
|||
{
|
||||
bool iss_sf, iss_valid = !a->w;
|
||||
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
|
||||
int memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
int memidx = core_a64_user_mem_index(s, a->unpriv);
|
||||
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
|
||||
|
||||
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
|
||||
|
|
@ -3594,7 +3921,7 @@ static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a)
|
|||
{
|
||||
bool iss_sf, iss_valid = !a->w;
|
||||
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
|
||||
int memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
int memidx = core_a64_user_mem_index(s, a->unpriv);
|
||||
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
|
||||
|
||||
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
|
||||
|
|
@ -3961,6 +4288,42 @@ static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool trans_GCSSTR(DisasContext *s, arg_GCSSTR *a)
|
||||
{
|
||||
ARMMMUIdx armidx;
|
||||
|
||||
if (!dc_isar_feature(aa64_gcs, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The pseudocode for GCSSTTR is
|
||||
*
|
||||
* effective_el = AArch64.IsUnprivAccessPriv() ? PSTATE.EL : EL0;
|
||||
* if (effective_el == PSTATE.EL) CheckGCSSTREnabled();
|
||||
*
|
||||
* We have cached the result of IsUnprivAccessPriv in DisasContext,
|
||||
* but since we need the result of full_a64_user_mem_index anyway,
|
||||
* use the mmu_idx test as a proxy for the effective_el test.
|
||||
*/
|
||||
armidx = full_a64_user_mem_index(s, a->unpriv);
|
||||
if (armidx == s->mmu_idx && s->gcsstr_el != 0) {
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF,
|
||||
syn_gcs_gcsstr(a->rn, a->rt),
|
||||
s->gcsstr_el);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (a->rn == 31) {
|
||||
gen_check_sp_alignment(s);
|
||||
}
|
||||
tcg_gen_qemu_st_i64(cpu_reg(s, a->rt),
|
||||
clean_data_tbi(s, cpu_reg_sp(s, a->rn)),
|
||||
core_gcs_mem_index(armidx),
|
||||
finalize_memop(s, MO_64 | MO_ALIGN));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a)
|
||||
{
|
||||
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
|
||||
|
|
@ -4492,7 +4855,7 @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
|
|||
return false;
|
||||
}
|
||||
|
||||
memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
memidx = core_a64_user_mem_index(s, a->unpriv);
|
||||
|
||||
/*
|
||||
* We pass option_a == true, matching our implementation;
|
||||
|
|
@ -4546,8 +4909,8 @@ static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn)
|
|||
return false;
|
||||
}
|
||||
|
||||
rmemidx = get_a64_user_mem_index(s, runpriv);
|
||||
wmemidx = get_a64_user_mem_index(s, wunpriv);
|
||||
rmemidx = core_a64_user_mem_index(s, runpriv);
|
||||
wmemidx = core_a64_user_mem_index(s, wunpriv);
|
||||
|
||||
/*
|
||||
* We pass option_a == true, matching our implementation;
|
||||
|
|
@ -10344,6 +10707,9 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
|||
dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE);
|
||||
dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH);
|
||||
dc->fpcr_nep = EX_TBFLAG_A64(tb_flags, NEP);
|
||||
dc->gcs_en = EX_TBFLAG_A64(tb_flags, GCS_EN);
|
||||
dc->gcs_rvcen = EX_TBFLAG_A64(tb_flags, GCS_RVCEN);
|
||||
dc->gcsstr_el = EX_TBFLAG_A64(tb_flags, GCSSTR_EL);
|
||||
dc->vec_len = 0;
|
||||
dc->vec_stride = 0;
|
||||
dc->cp_regs = arm_cpu->cp_regs;
|
||||
|
|
@ -10570,6 +10936,8 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
emit_delayed_exceptions(dc);
|
||||
}
|
||||
|
||||
const TranslatorOps aarch64_translator_ops = {
|
||||
|
|
|
|||
|
|
@ -1088,6 +1088,57 @@ void gen_exception_insn(DisasContext *s, target_long pc_diff,
|
|||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
TCGLabel *delay_exception_el(DisasContext *s, int excp,
|
||||
uint32_t syn, uint32_t target_el)
|
||||
{
|
||||
/* Use tcg_malloc for automatic release on longjmp out of translation. */
|
||||
DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
|
||||
|
||||
memset(e, 0, sizeof(*e));
|
||||
|
||||
/* Save enough of the current state to satisfy gen_exception_insn. */
|
||||
e->pc_curr = s->pc_curr;
|
||||
e->pc_save = s->pc_save;
|
||||
if (!s->aarch64) {
|
||||
e->condexec_cond = s->condexec_cond;
|
||||
e->condexec_mask = s->condexec_mask;
|
||||
}
|
||||
|
||||
e->excp = excp;
|
||||
e->syn = syn;
|
||||
e->target_el = target_el;
|
||||
|
||||
e->next = s->delay_excp_list;
|
||||
s->delay_excp_list = e;
|
||||
|
||||
e->lab = gen_new_label();
|
||||
return e->lab;
|
||||
}
|
||||
|
||||
TCGLabel *delay_exception(DisasContext *s, int excp, uint32_t syn)
|
||||
{
|
||||
return delay_exception_el(s, excp, syn, 0);
|
||||
}
|
||||
|
||||
void emit_delayed_exceptions(DisasContext *s)
|
||||
{
|
||||
for (DisasDelayException *e = s->delay_excp_list; e ; e = e->next) {
|
||||
gen_set_label(e->lab);
|
||||
|
||||
/* Restore the insn state to satisfy gen_exception_insn. */
|
||||
s->pc_curr = e->pc_curr;
|
||||
s->pc_save = e->pc_save;
|
||||
s->condexec_cond = e->condexec_cond;
|
||||
s->condexec_mask = e->condexec_mask;
|
||||
|
||||
if (e->target_el) {
|
||||
gen_exception_insn_el(s, 0, e->excp, e->syn, e->target_el);
|
||||
} else {
|
||||
gen_exception_insn(s, 0, e->excp, e->syn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
|
||||
{
|
||||
gen_set_condexec(s);
|
||||
|
|
@ -1723,21 +1774,11 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|||
|
||||
if (maskbit != 4 && maskbit != 14) {
|
||||
/* T4 and T14 are RES0 so never cause traps */
|
||||
TCGv_i32 t;
|
||||
DisasLabel over = gen_disas_label(s);
|
||||
TCGLabel *fail = delay_exception_el(s, EXCP_UDEF, syndrome, 2);
|
||||
TCGv_i32 t =
|
||||
load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
|
||||
|
||||
t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
|
||||
tcg_gen_andi_i32(t, t, 1u << maskbit);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label);
|
||||
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
|
||||
/*
|
||||
* gen_exception_insn() will set is_jmp to DISAS_NORETURN,
|
||||
* but since we're conditionally branching over it, we want
|
||||
* to assume continue-to-next-instruction.
|
||||
*/
|
||||
s->base.is_jmp = DISAS_NEXT;
|
||||
set_disas_label(s, over);
|
||||
tcg_gen_brcondi_i32(TCG_COND_TSTNE, t, 1u << maskbit, fail);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -5557,11 +5598,10 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
|
|||
|
||||
if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
|
||||
/* Need to do a runtime check for LTPSIZE != 4 */
|
||||
DisasLabel skipexc = gen_disas_label(s);
|
||||
TCGLabel *fail = delay_exception(s, EXCP_INVSTATE, syn_uncategorized());
|
||||
|
||||
tmp = load_cpu_field(v7m.ltpsize);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
|
||||
gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
|
||||
set_disas_label(s, skipexc);
|
||||
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 4, fail);
|
||||
}
|
||||
|
||||
if (a->f) {
|
||||
|
|
@ -6791,6 +6831,8 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
gen_goto_tb(dc, 1, curr_insn_len(dc));
|
||||
}
|
||||
}
|
||||
|
||||
emit_delayed_exceptions(dc);
|
||||
}
|
||||
|
||||
static const TranslatorOps arm_translator_ops = {
|
||||
|
|
|
|||
|
|
@ -21,9 +21,25 @@ typedef struct DisasLabel {
|
|||
target_ulong pc_save;
|
||||
} DisasLabel;
|
||||
|
||||
/*
|
||||
* Emit an exception call out of line.
|
||||
*/
|
||||
typedef struct DisasDelayException {
|
||||
struct DisasDelayException *next;
|
||||
TCGLabel *lab;
|
||||
target_long pc_curr;
|
||||
target_long pc_save;
|
||||
int condexec_mask;
|
||||
int condexec_cond;
|
||||
uint32_t excp;
|
||||
uint32_t syn;
|
||||
uint32_t target_el;
|
||||
} DisasDelayException;
|
||||
|
||||
typedef struct DisasContext {
|
||||
DisasContextBase base;
|
||||
const ARMISARegisters *isar;
|
||||
DisasDelayException *delay_excp_list;
|
||||
|
||||
/* The address of the current instruction being translated. */
|
||||
target_ulong pc_curr;
|
||||
|
|
@ -166,6 +182,12 @@ typedef struct DisasContext {
|
|||
bool fpcr_ah;
|
||||
/* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */
|
||||
bool fpcr_nep;
|
||||
/* True if GCSEnabled. */
|
||||
bool gcs_en;
|
||||
/* True if GCSReturnValueCheckEnabled. */
|
||||
bool gcs_rvcen;
|
||||
/* GCSSTR exception EL or 0 if enabled */
|
||||
uint8_t gcsstr_el;
|
||||
/*
|
||||
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
|
||||
* < 0, set by the current instruction.
|
||||
|
|
@ -359,6 +381,10 @@ void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
|
|||
uint32_t syn, uint32_t target_el);
|
||||
void gen_exception_insn(DisasContext *s, target_long pc_diff,
|
||||
int excp, uint32_t syn);
|
||||
TCGLabel *delay_exception_el(DisasContext *s, int excp,
|
||||
uint32_t syn, uint32_t target_el);
|
||||
TCGLabel *delay_exception(DisasContext *s, int excp, uint32_t syn);
|
||||
void emit_delayed_exceptions(DisasContext *s);
|
||||
|
||||
/* Return state of Alternate Half-precision flag, caller frees result */
|
||||
static inline TCGv_i32 get_ahp_flag(void)
|
||||
|
|
@ -372,27 +398,27 @@ static inline TCGv_i32 get_ahp_flag(void)
|
|||
}
|
||||
|
||||
/* Set bits within PSTATE. */
|
||||
static inline void set_pstate_bits(uint32_t bits)
|
||||
static inline void set_pstate_bits(uint64_t bits)
|
||||
{
|
||||
TCGv_i32 p = tcg_temp_new_i32();
|
||||
TCGv_i64 p = tcg_temp_new_i64();
|
||||
|
||||
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
|
||||
|
||||
tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
tcg_gen_ori_i32(p, p, bits);
|
||||
tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
tcg_gen_ld_i64(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
tcg_gen_ori_i64(p, p, bits);
|
||||
tcg_gen_st_i64(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
}
|
||||
|
||||
/* Clear bits within PSTATE. */
|
||||
static inline void clear_pstate_bits(uint32_t bits)
|
||||
static inline void clear_pstate_bits(uint64_t bits)
|
||||
{
|
||||
TCGv_i32 p = tcg_temp_new_i32();
|
||||
TCGv_i64 p = tcg_temp_new_i64();
|
||||
|
||||
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
|
||||
|
||||
tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
tcg_gen_andi_i32(p, p, ~bits);
|
||||
tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
tcg_gen_ld_i64(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
tcg_gen_andi_i64(p, p, ~bits);
|
||||
tcg_gen_st_i64(p, tcg_env, offsetof(CPUARMState, pstate));
|
||||
}
|
||||
|
||||
/* If the singlestep state is Active-not-pending, advance to Active-pending. */
|
||||
|
|
|
|||
|
|
@ -85,8 +85,8 @@ class Aarch64DevicePassthrough(QemuSystemTest):
|
|||
# https://docs.kernel.org/driver-api/vfio.html#vfio-device-cde
|
||||
ASSET_DEVICE_PASSTHROUGH_STACK = Asset(
|
||||
('https://github.com/pbo-linaro/qemu-linux-stack/'
|
||||
'releases/download/build/device_passthrough-c3fb84a.tar.xz'),
|
||||
'15ac2b02bed0c0ea8e3e007de0bcfdaf6fd51c1ba98213f841dc7d01d6f72f04')
|
||||
'releases/download/build/device_passthrough-a9612a2.tar.xz'),
|
||||
'f7d2f70912e7231986e6e293e1a2c4786dd02bec113a7acb6bfc619e96155455')
|
||||
|
||||
# This tests the device passthrough implementation, by booting a VM
|
||||
# supporting it with two nvme disks attached, and launching a nested VM
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ class Aarch64RMESbsaRefMachine(QemuSystemTest):
|
|||
# ./build.sh && ./archive_artifacts.sh out.tar.xz
|
||||
ASSET_RME_STACK_SBSA = Asset(
|
||||
('https://github.com/pbo-linaro/qemu-linux-stack/'
|
||||
'releases/download/build/rme_sbsa_release-a7f02cf.tar.xz'),
|
||||
'27d8400b11befb828d6db0cab97e7ae102d0992c928d3dfbf38b24b6cf6c324c')
|
||||
'releases/download/build/rme_sbsa_release-6a2dfc5.tar.xz'),
|
||||
'5adba482aa069912292a8da746c6b21268224d9d81c97fe7c0bed690579ebdcb')
|
||||
|
||||
# This tests the FEAT_RME cpu implementation, by booting a VM supporting it,
|
||||
# and launching a nested VM using it.
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ class Aarch64RMEVirtMachine(QemuSystemTest):
|
|||
# ./build.sh && ./archive_artifacts.sh out.tar.xz
|
||||
ASSET_RME_STACK_VIRT = Asset(
|
||||
('https://github.com/pbo-linaro/qemu-linux-stack/'
|
||||
'releases/download/build/rme_release-86101e5.tar.xz'),
|
||||
'e42fef8439badb52a071ac446fc33cff4cb7d61314c7a28fdbe61a11e1faad3a')
|
||||
'releases/download/build/rme_release-56bc99e.tar.xz'),
|
||||
'0e3dc6b8a4b828dbae09c951a40dcb710eded084b32432b50c69cf4173ffa4be')
|
||||
|
||||
# This tests the FEAT_RME cpu implementation, by booting a VM supporting it,
|
||||
# and launching a nested VM using it.
|
||||
|
|
|
|||
|
|
@ -75,6 +75,11 @@ AARCH64_TESTS += $(SME_TESTS)
|
|||
$(SME_TESTS): CFLAGS += $(CROSS_AS_HAS_ARMV9_SME)
|
||||
endif
|
||||
|
||||
# GCS Tests
|
||||
GCS_TESTS += gcsstr gcspushm gcsss
|
||||
AARCH64_TESTS += $(GCS_TESTS)
|
||||
$(GCS_TESTS): gcs.h
|
||||
|
||||
# System Registers Tests
|
||||
AARCH64_TESTS += sysregs
|
||||
|
||||
|
|
|
|||
80
tests/tcg/aarch64/gcs.h
Normal file
80
tests/tcg/aarch64/gcs.h
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Linux kernel fallback API definitions for GCS and test helpers.
|
||||
*
|
||||
* Copyright (c) 2025 Linaro Ltd
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#ifndef PR_GET_SHADOW_STACK_STATUS
|
||||
#define PR_GET_SHADOW_STACK_STATUS 74
|
||||
#endif
|
||||
#ifndef PR_SET_SHADOW_STACK_STATUS
|
||||
#define PR_SET_SHADOW_STACK_STATUS 75
|
||||
#endif
|
||||
#ifndef PR_LOCK_SHADOW_STACK_STATUS
|
||||
#define PR_LOCK_SHADOW_STACK_STATUS 76
|
||||
#endif
|
||||
#ifndef PR_SHADOW_STACK_ENABLE
|
||||
# define PR_SHADOW_STACK_ENABLE (1 << 0)
|
||||
# define PR_SHADOW_STACK_WRITE (1 << 1)
|
||||
# define PR_SHADOW_STACK_PUSH (1 << 2)
|
||||
#endif
|
||||
#ifndef SHADOW_STACK_SET_TOKEN
|
||||
#define SHADOW_STACK_SET_TOKEN (1 << 0)
|
||||
#endif
|
||||
#ifndef SHADOW_STACK_SET_MARKER
|
||||
#define SHADOW_STACK_SET_MARKER (1 << 1)
|
||||
#endif
|
||||
#ifndef SEGV_CPERR
|
||||
#define SEGV_CPERR 10
|
||||
#endif
|
||||
#ifndef __NR_map_shadow_stack
|
||||
#define __NR_map_shadow_stack 453
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros, and implement the syscall inline, lest we fail
|
||||
* the checked return from any function call.
|
||||
*/
|
||||
#define enable_gcs(flags) \
|
||||
do { \
|
||||
register long num __asm__ ("x8") = __NR_prctl; \
|
||||
register long arg1 __asm__ ("x0") = PR_SET_SHADOW_STACK_STATUS; \
|
||||
register long arg2 __asm__ ("x1") = PR_SHADOW_STACK_ENABLE | flags; \
|
||||
register long arg3 __asm__ ("x2") = 0; \
|
||||
register long arg4 __asm__ ("x3") = 0; \
|
||||
register long arg5 __asm__ ("x4") = 0; \
|
||||
asm volatile("svc #0" \
|
||||
: "+r"(arg1) \
|
||||
: "r"(arg2), "r"(arg3), "r"(arg4), "r"(arg5), "r"(num) \
|
||||
: "memory", "cc"); \
|
||||
if (arg1) { \
|
||||
errno = -arg1; \
|
||||
perror("PR_SET_SHADOW_STACK_STATUS"); \
|
||||
exit(2); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define gcspr() \
|
||||
({ uint64_t *r; asm volatile("mrs %0, s3_3_c2_c5_1" : "=r"(r)); r; })
|
||||
|
||||
#define gcsss1(val) \
|
||||
do { \
|
||||
asm volatile("sys #3, c7, c7, #2, %0" : : "r"(val) : "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define gcsss2() \
|
||||
({ uint64_t *r; \
|
||||
asm volatile("sysl %0, #3, c7, c7, #3" : "=r"(r) : : "memory"); r; })
|
||||
71
tests/tcg/aarch64/gcspushm.c
Normal file
71
tests/tcg/aarch64/gcspushm.c
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "gcs.h"
|
||||
|
||||
|
||||
#define GCSPUSHM "sys #3, c7, c7, #0, %[push]"
|
||||
#define GCSPOPM "sysl %[pop], #3, c7, c7, #1"
|
||||
|
||||
static void test_sigsegv(int sig, siginfo_t *info, void *vuc)
|
||||
{
|
||||
ucontext_t *uc = vuc;
|
||||
uint64_t inst_sigsegv;
|
||||
|
||||
__asm__("adr %0, inst_sigsegv" : "=r"(inst_sigsegv));
|
||||
assert(uc->uc_mcontext.pc == inst_sigsegv);
|
||||
assert(info->si_code == SEGV_CPERR);
|
||||
/* TODO: Dig for ESR and verify syndrome. */
|
||||
uc->uc_mcontext.pc += 4;
|
||||
}
|
||||
|
||||
static void test_sigill(int sig, siginfo_t *info, void *vuc)
|
||||
{
|
||||
ucontext_t *uc = vuc;
|
||||
uint64_t inst_sigill;
|
||||
|
||||
__asm__("adr %0, inst_sigill" : "=r"(inst_sigill));
|
||||
assert(uc->uc_mcontext.pc == inst_sigill);
|
||||
assert(info->si_code == ILL_ILLOPC);
|
||||
uc->uc_mcontext.pc += 4;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
struct sigaction sa = { .sa_flags = SA_SIGINFO };
|
||||
uint64_t old, new;
|
||||
|
||||
sa.sa_sigaction = test_sigsegv;
|
||||
if (sigaction(SIGSEGV, &sa, NULL) < 0) {
|
||||
perror("sigaction");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sa.sa_sigaction = test_sigill;
|
||||
if (sigaction(SIGILL, &sa, NULL) < 0) {
|
||||
perror("sigaction");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Pushm is disabled -- SIGILL via EC_SYSTEMREGISTERTRAP */
|
||||
asm volatile("inst_sigill:\t" GCSPUSHM
|
||||
: : [push] "r" (1));
|
||||
|
||||
enable_gcs(PR_SHADOW_STACK_PUSH);
|
||||
|
||||
/* Valid value -- low 2 bits clear */
|
||||
old = 0xdeadbeeffeedcaec;
|
||||
asm volatile(GCSPUSHM "\n\t" GCSPOPM
|
||||
: [pop] "=r" (new)
|
||||
: [push] "r" (old)
|
||||
: "memory");
|
||||
assert(old == new);
|
||||
|
||||
/* Invalid value -- SIGSEGV via EC_GCS */
|
||||
asm volatile(GCSPUSHM "\n"
|
||||
"inst_sigsegv:\t" GCSPOPM
|
||||
: [pop] "=r" (new)
|
||||
: [push] "r" (1)
|
||||
: "memory");
|
||||
|
||||
exit(0);
|
||||
}
|
||||
74
tests/tcg/aarch64/gcsss.c
Normal file
74
tests/tcg/aarch64/gcsss.c
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "gcs.h"
|
||||
|
||||
#define IN_PROGRESS(X) ((uint64_t)(X) | 5)
|
||||
#define CAP(X) (((uint64_t)(X) & ~0xfff) + 1)
|
||||
|
||||
static uint64_t * __attribute__((noinline)) recurse(size_t index)
|
||||
{
|
||||
if (index == 0) {
|
||||
return gcspr();
|
||||
}
|
||||
return recurse(index - 1);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
void *tmp;
|
||||
uint64_t *alt_stack, *alt_cap;
|
||||
uint64_t *orig_pr, *orig_cap;
|
||||
uint64_t *bottom;
|
||||
size_t pagesize = getpagesize();
|
||||
size_t words;
|
||||
|
||||
enable_gcs(0);
|
||||
orig_pr = gcspr();
|
||||
|
||||
/* Allocate a guard page before and after. */
|
||||
tmp = mmap(0, 3 * pagesize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||
assert(tmp != MAP_FAILED);
|
||||
|
||||
/* map_shadow_stack won't replace existing mappings */
|
||||
munmap(tmp + pagesize, pagesize);
|
||||
|
||||
/* Allocate a new stack between the guards. */
|
||||
alt_stack = (uint64_t *)
|
||||
syscall(__NR_map_shadow_stack, tmp + pagesize, pagesize,
|
||||
SHADOW_STACK_SET_TOKEN);
|
||||
assert(alt_stack == tmp + pagesize);
|
||||
|
||||
words = pagesize / 8;
|
||||
alt_cap = alt_stack + words - 1;
|
||||
|
||||
/* SHADOW_STACK_SET_TOKEN set the cap. */
|
||||
assert(*alt_cap == CAP(alt_cap));
|
||||
|
||||
/* Swap to the alt stack, one step at a time. */
|
||||
gcsss1(alt_cap);
|
||||
|
||||
assert(gcspr() == alt_cap);
|
||||
assert(*alt_cap == IN_PROGRESS(orig_pr));
|
||||
|
||||
orig_cap = gcsss2();
|
||||
|
||||
assert(orig_cap == orig_pr - 1);
|
||||
assert(*orig_cap == CAP(orig_cap));
|
||||
assert(gcspr() == alt_stack + words);
|
||||
|
||||
/* We should be able to use the whole stack. */
|
||||
bottom = recurse(words - 1);
|
||||
assert(bottom == alt_stack);
|
||||
|
||||
/* We should be back where we started. */
|
||||
assert(gcspr() == alt_stack + words);
|
||||
|
||||
/* Swap back to the original stack. */
|
||||
gcsss1(orig_cap);
|
||||
tmp = gcsss2();
|
||||
|
||||
assert(gcspr() == orig_pr);
|
||||
assert(tmp == alt_cap);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
48
tests/tcg/aarch64/gcsstr.c
Normal file
48
tests/tcg/aarch64/gcsstr.c
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "gcs.h"
|
||||
|
||||
/*
|
||||
* A single garbage store to the gcs stack.
|
||||
* The asm inside must be unique, so disallow inlining.
|
||||
*/
|
||||
void __attribute__((noinline))
|
||||
test_gcsstr(void)
|
||||
{
|
||||
register uint64_t *ptr __asm__("x0") = gcspr();
|
||||
/* GCSSTR x1, x0 */
|
||||
__asm__("inst_gcsstr: .inst 0xd91f1c01" : : "r"(--ptr));
|
||||
}
|
||||
|
||||
static void test_sigsegv(int sig, siginfo_t *info, void *vuc)
|
||||
{
|
||||
ucontext_t *uc = vuc;
|
||||
uint64_t inst_gcsstr;
|
||||
|
||||
__asm__("adr %0, inst_gcsstr" : "=r"(inst_gcsstr));
|
||||
assert(uc->uc_mcontext.pc == inst_gcsstr);
|
||||
assert(info->si_code == SEGV_CPERR);
|
||||
/* TODO: Dig for ESR and verify syndrome. */
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
struct sigaction sa = {
|
||||
.sa_sigaction = test_sigsegv,
|
||||
.sa_flags = SA_SIGINFO,
|
||||
};
|
||||
|
||||
/* Enable GCSSTR and test the store succeeds. */
|
||||
enable_gcs(PR_SHADOW_STACK_WRITE);
|
||||
test_gcsstr();
|
||||
|
||||
/* Disable GCSSTR and test the resulting sigsegv. */
|
||||
enable_gcs(0);
|
||||
if (sigaction(SIGSEGV, &sa, NULL) < 0) {
|
||||
perror("sigaction");
|
||||
exit(1);
|
||||
}
|
||||
test_gcsstr();
|
||||
abort();
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue