target/loongarch: Add present and write bit with pte entry
With hardware PTW feature enabled, Present bit and Write bit is checked by hardware, rather Valid bit and Dirty bit. Bit P means that the page is valid and present, and bit W means that the page is writable. The original V bit is treated as access bit, hardware sets this bit if there is a read or write access. Bit D bit is updated by hardware if there is a write access. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Reviewed-by: Song Gao <gaosong@loongson.cn>
This commit is contained in:
parent
5cba5a518b
commit
7b7c6fae9b
4 changed files with 45 additions and 11 deletions
|
|
@ -70,6 +70,8 @@ FIELD(TLBENTRY, PLV, 2, 2)
|
|||
FIELD(TLBENTRY, MAT, 4, 2)
|
||||
FIELD(TLBENTRY, G, 6, 1)
|
||||
FIELD(TLBENTRY, HUGE, 6, 1)
|
||||
FIELD(TLBENTRY, P, 7, 1)
|
||||
FIELD(TLBENTRY, W, 8, 1)
|
||||
FIELD(TLBENTRY, HGLOBAL, 12, 1)
|
||||
FIELD(TLBENTRY, LEVEL, 13, 2)
|
||||
FIELD(TLBENTRY_32, PPN, 8, 24)
|
||||
|
|
|
|||
|
|
@ -27,6 +27,37 @@ typedef struct MMUContext {
|
|||
int prot;
|
||||
} MMUContext;
|
||||
|
||||
static inline bool cpu_has_ptw(CPULoongArchState *env)
|
||||
{
|
||||
return !!FIELD_EX64(env->CSR_PWCH, CSR_PWCH, HPTW_EN);
|
||||
}
|
||||
|
||||
static inline bool pte_present(CPULoongArchState *env, uint64_t entry)
|
||||
{
|
||||
uint8_t present;
|
||||
|
||||
if (cpu_has_ptw(env)) {
|
||||
present = FIELD_EX64(entry, TLBENTRY, P);
|
||||
} else {
|
||||
present = FIELD_EX64(entry, TLBENTRY, V);
|
||||
}
|
||||
|
||||
return !!present;
|
||||
}
|
||||
|
||||
static inline bool pte_write(CPULoongArchState *env, uint64_t entry)
|
||||
{
|
||||
uint8_t writable;
|
||||
|
||||
if (cpu_has_ptw(env)) {
|
||||
writable = FIELD_EX64(entry, TLBENTRY, W);
|
||||
} else {
|
||||
writable = FIELD_EX64(entry, TLBENTRY, D);
|
||||
}
|
||||
|
||||
return !!writable;
|
||||
}
|
||||
|
||||
bool check_ps(CPULoongArchState *ent, uint8_t ps);
|
||||
TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
|
||||
MMUAccessType access_type, int mmu_idx);
|
||||
|
|
|
|||
|
|
@ -49,12 +49,13 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
|
|||
{
|
||||
uint64_t plv = mmu_idx;
|
||||
uint64_t tlb_entry, tlb_ppn;
|
||||
uint8_t tlb_ps, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
|
||||
uint8_t tlb_ps, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
|
||||
bool tlb_v, tlb_d;
|
||||
|
||||
tlb_entry = context->pte;
|
||||
tlb_ps = context->ps;
|
||||
tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
|
||||
tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
|
||||
tlb_v = pte_present(env, tlb_entry);
|
||||
tlb_d = pte_write(env, tlb_entry);
|
||||
tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
|
||||
if (is_la64(env)) {
|
||||
tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
|
||||
|
|
|
|||
|
|
@ -114,9 +114,8 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index)
|
|||
uint8_t tlb_ps;
|
||||
LoongArchTLB *tlb = &env->tlb[index];
|
||||
int idxmap = BIT(MMU_KERNEL_IDX) | BIT(MMU_USER_IDX);
|
||||
uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
|
||||
uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
|
||||
uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
||||
bool tlb_v;
|
||||
|
||||
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
||||
pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
|
||||
|
|
@ -124,12 +123,14 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index)
|
|||
addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask;
|
||||
addr = sextract64(addr, 0, TARGET_VIRT_ADDR_SPACE_BITS);
|
||||
|
||||
if (tlb_v0) {
|
||||
tlb_v = pte_present(env, tlb->tlb_entry0);
|
||||
if (tlb_v) {
|
||||
tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
|
||||
idxmap, TARGET_LONG_BITS);
|
||||
}
|
||||
|
||||
if (tlb_v1) {
|
||||
tlb_v = pte_present(env, tlb->tlb_entry1);
|
||||
if (tlb_v) {
|
||||
tlb_flush_range_by_mmuidx(env_cpu(env), addr + pagesize, pagesize,
|
||||
idxmap, TARGET_LONG_BITS);
|
||||
}
|
||||
|
|
@ -335,8 +336,7 @@ void helper_tlbwr(CPULoongArchState *env)
|
|||
{
|
||||
int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
|
||||
LoongArchTLB *old, new = {};
|
||||
bool skip_inv = false;
|
||||
uint8_t tlb_v0, tlb_v1;
|
||||
bool skip_inv = false, tlb_v0, tlb_v1;
|
||||
|
||||
old = env->tlb + index;
|
||||
if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
|
||||
|
|
@ -348,8 +348,8 @@ void helper_tlbwr(CPULoongArchState *env)
|
|||
/* Check whether ASID/VPPN is the same */
|
||||
if (old->tlb_misc == new.tlb_misc) {
|
||||
/* Check whether both even/odd pages is the same or invalid */
|
||||
tlb_v0 = FIELD_EX64(old->tlb_entry0, TLBENTRY, V);
|
||||
tlb_v1 = FIELD_EX64(old->tlb_entry1, TLBENTRY, V);
|
||||
tlb_v0 = pte_present(env, old->tlb_entry0);
|
||||
tlb_v1 = pte_present(env, old->tlb_entry1);
|
||||
if ((!tlb_v0 || new.tlb_entry0 == old->tlb_entry0) &&
|
||||
(!tlb_v1 || new.tlb_entry1 == old->tlb_entry1)) {
|
||||
skip_inv = true;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue