ANDROID: arm64: Reclaim some cpucaps to aid in backporting
Although we padded the cpucaps space for android13-5.10, the move to a generated header in android13-5.15 was missed and consequently we are unable to backport some errata workarounds from upstream without breaking the KMI. To resolve this problem, reclaim some cpucaps allocated by errata workarounds which will never be relevant to Android (i.e. those which apply only to server parts such as Cavium designs and the Neoverse-N1) and which have their use carefully confined to core architecture code. Bug: 248633761 Signed-off-by: Will Deacon <willdeacon@google.com> Change-Id: Id2d9429cae62d97fee44d051318aa5e5b93b29d8
This commit is contained in:
@@ -639,6 +639,7 @@ config ARM64_ERRATUM_1463225
|
||||
config ARM64_ERRATUM_1542419
|
||||
bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
|
||||
default y
|
||||
depends on BROKEN # CPU cap re-allocated by Android
|
||||
help
|
||||
This option adds a workaround for ARM Neoverse-N1 erratum
|
||||
1542419.
|
||||
@@ -822,6 +823,7 @@ config CAVIUM_ERRATUM_23154
|
||||
config CAVIUM_ERRATUM_27456
|
||||
bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
|
||||
default y
|
||||
depends on BROKEN # CPU cap re-allocated by Android
|
||||
help
|
||||
On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
|
||||
instructions may cause the icache to become corrupted if it
|
||||
@@ -833,6 +835,7 @@ config CAVIUM_ERRATUM_27456
|
||||
config CAVIUM_ERRATUM_30115
|
||||
bool "Cavium erratum 30115: Guest may disable interrupts in host"
|
||||
default y
|
||||
depends on BROKEN # CPU cap re-allocated by Android
|
||||
help
|
||||
On ThunderX T88 pass 1.x through 2.2, T81 pass 1.0 through
|
||||
1.2, and T83 Pass 1.0, KVM guest execution may disable
|
||||
@@ -844,6 +847,7 @@ config CAVIUM_ERRATUM_30115
|
||||
config CAVIUM_TX2_ERRATUM_219
|
||||
bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
|
||||
default y
|
||||
depends on BROKEN # CPU caps re-allocated by Android
|
||||
help
|
||||
On Cavium ThunderX2, a load, store or prefetch instruction between a
|
||||
TTBR update and the corresponding context synchronizing operation can
|
||||
|
||||
@@ -98,10 +98,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
|
||||
(arm64_ftr_reg_ctrel0.sys_val & mask))
|
||||
enable_uct_trap = true;
|
||||
|
||||
/* ... or if the system is affected by an erratum */
|
||||
if (cap->capability == ARM64_WORKAROUND_1542419)
|
||||
enable_uct_trap = true;
|
||||
|
||||
if (enable_uct_trap)
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
|
||||
}
|
||||
@@ -182,18 +178,6 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
u32 midr = read_cpuid_id();
|
||||
bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
|
||||
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return is_midr_in_range(midr, &range) && has_dic;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
|
||||
static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
|
||||
@@ -216,28 +200,6 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
const struct midr_range cavium_erratum_27456_cpus[] = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_30115
|
||||
static const struct midr_range cavium_erratum_30115_cpus[] = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.2 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
|
||||
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
|
||||
MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
|
||||
/* Cavium ThunderX, T83 pass 1.0 */
|
||||
MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
|
||||
static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
|
||||
{
|
||||
@@ -430,20 +392,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_23154,
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
{
|
||||
.desc = "Cavium erratum 27456",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_27456,
|
||||
ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_30115
|
||||
{
|
||||
.desc = "Cavium erratum 30115",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_30115,
|
||||
ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.desc = "Mismatched cache type (CTR_EL0)",
|
||||
@@ -538,29 +486,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.midr_range_list = erratum_1463225,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
|
||||
{
|
||||
.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
.matches = needs_tx2_tvm_workaround,
|
||||
},
|
||||
{
|
||||
.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1542419
|
||||
{
|
||||
/* we depend on the firmware portion for correctness */
|
||||
.desc = "ARM erratum 1542419 (kernel portion)",
|
||||
.capability = ARM64_WORKAROUND_1542419,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = has_neoverse_n1_erratum_1542419,
|
||||
.cpu_enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1508412
|
||||
{
|
||||
/* we depend on the firmware portion for correctness */
|
||||
|
||||
@@ -1395,16 +1395,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry,
|
||||
return has_sre;
|
||||
}
|
||||
|
||||
static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
u32 midr = read_cpuid_id();
|
||||
|
||||
/* Cavium ThunderX pass 1.x and 2.x */
|
||||
return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
|
||||
MIDR_CPU_VAR_REV(0, 0),
|
||||
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
|
||||
}
|
||||
|
||||
static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
@@ -1493,18 +1483,6 @@ bool kaslr_requires_kpti(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Systems affected by Cavium erratum 24756 are incompatible
|
||||
* with KPTI.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
|
||||
extern const struct midr_range cavium_erratum_27456_cpus[];
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(),
|
||||
cavium_erratum_27456_cpus))
|
||||
return false;
|
||||
}
|
||||
|
||||
return kaslr_offset() > 0;
|
||||
}
|
||||
|
||||
@@ -1545,20 +1523,6 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
if (!meltdown_safe)
|
||||
__meltdown_safe = false;
|
||||
|
||||
/*
|
||||
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
||||
* ThunderX leads to apparent I-cache corruption of kernel text, which
|
||||
* ends as well as you might imagine. Don't even try. We cannot rely
|
||||
* on the cpus_have_*cap() helpers here to detect the CPU erratum
|
||||
* because cpucap detection order may change. However, since we know
|
||||
* affected CPUs are always in a homogeneous configuration, it is
|
||||
* safe to rely on this_cpu_has_cap() here.
|
||||
*/
|
||||
if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
str = "ARM64_WORKAROUND_CAVIUM_27456";
|
||||
__kpti_forced = -1;
|
||||
}
|
||||
|
||||
/* Useful for KASLR robustness */
|
||||
if (kaslr_requires_kpti()) {
|
||||
if (!__kpti_forced) {
|
||||
@@ -1988,12 +1952,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.min_field_value = 2,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_LSE_ATOMICS */
|
||||
{
|
||||
.desc = "Software prefetching using PRFM",
|
||||
.capability = ARM64_HAS_NO_HW_PREFETCH,
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
.matches = has_no_hw_prefetch,
|
||||
},
|
||||
{
|
||||
.desc = "Virtualization Host Extensions",
|
||||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||
|
||||
@@ -691,9 +691,7 @@ alternative_else_nop_endif
|
||||
tramp_map_kernel x30
|
||||
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
tramp_data_read_var x30, vectors
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
prfm plil1strm, [x30, #(1b - \vector_start)]
|
||||
alternative_else_nop_endif
|
||||
|
||||
msr vbar_el1, x30
|
||||
isb
|
||||
|
||||
@@ -32,15 +32,6 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
|
||||
if (fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
||||
/*
|
||||
* The workaround requires an inner-shareable tlbi.
|
||||
* We pick the reserved-ASID to minimise the impact.
|
||||
*/
|
||||
__tlbi(aside1is, __TLBI_VADDR(0, 0));
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
ret = caches_clean_inval_user_pou(start, start + chunk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -587,15 +587,6 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
|
||||
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
||||
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
||||
/* Hide DIC so that we can trap the unnecessary maintenance...*/
|
||||
val &= ~BIT(CTR_DIC_SHIFT);
|
||||
|
||||
/* ... and fake IminLine to reduce the number of traps. */
|
||||
val &= ~CTR_IMINLINE_MASK;
|
||||
val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
|
||||
}
|
||||
|
||||
pt_regs_write_reg(regs, rt, val);
|
||||
|
||||
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
||||
|
||||
@@ -110,9 +110,6 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 hcr = vcpu->arch.hcr_el2;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
|
||||
hcr |= HCR_TVM;
|
||||
|
||||
write_sysreg(hcr, hcr_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
||||
@@ -203,61 +200,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
||||
int rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
u64 val = vcpu_get_reg(vcpu, rt);
|
||||
|
||||
/*
|
||||
* The normal sysreg handling code expects to see the traps,
|
||||
* let's not do anything here.
|
||||
*/
|
||||
if (vcpu->arch.hcr_el2 & HCR_TVM)
|
||||
return false;
|
||||
|
||||
switch (sysreg) {
|
||||
case SYS_SCTLR_EL1:
|
||||
write_sysreg_el1(val, SYS_SCTLR);
|
||||
break;
|
||||
case SYS_TTBR0_EL1:
|
||||
write_sysreg_el1(val, SYS_TTBR0);
|
||||
break;
|
||||
case SYS_TTBR1_EL1:
|
||||
write_sysreg_el1(val, SYS_TTBR1);
|
||||
break;
|
||||
case SYS_TCR_EL1:
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
break;
|
||||
case SYS_ESR_EL1:
|
||||
write_sysreg_el1(val, SYS_ESR);
|
||||
break;
|
||||
case SYS_FAR_EL1:
|
||||
write_sysreg_el1(val, SYS_FAR);
|
||||
break;
|
||||
case SYS_AFSR0_EL1:
|
||||
write_sysreg_el1(val, SYS_AFSR0);
|
||||
break;
|
||||
case SYS_AFSR1_EL1:
|
||||
write_sysreg_el1(val, SYS_AFSR1);
|
||||
break;
|
||||
case SYS_MAIR_EL1:
|
||||
write_sysreg_el1(val, SYS_MAIR);
|
||||
break;
|
||||
case SYS_AMAIR_EL1:
|
||||
write_sysreg_el1(val, SYS_AMAIR);
|
||||
break;
|
||||
case SYS_CONTEXTIDR_EL1:
|
||||
write_sysreg_el1(val, SYS_CONTEXTIDR);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
__kvm_skip_instr(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool esr_is_ptrauth_trap(u32 esr)
|
||||
{
|
||||
switch (esr_sys64_to_sysreg(esr)) {
|
||||
@@ -314,10 +256,6 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
|
||||
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
|
||||
handle_tx2_tvm(vcpu))
|
||||
return true;
|
||||
|
||||
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
|
||||
__vgic_v3_perform_cpuif_access(vcpu) == 1)
|
||||
return true;
|
||||
|
||||
@@ -683,11 +683,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
if (kvm_vgic_global_state.vcpu_base == 0)
|
||||
kvm_info("disabling GICv2 emulation\n");
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
|
||||
group0_trap = true;
|
||||
group1_trap = true;
|
||||
}
|
||||
|
||||
if (vgic_v3_broken_seis()) {
|
||||
kvm_info("GICv3 with broken locally generated SEI\n");
|
||||
|
||||
|
||||
@@ -18,13 +18,6 @@
|
||||
* x1 - src
|
||||
*/
|
||||
SYM_FUNC_START_PI(copy_page)
|
||||
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
||||
// Prefetch three cache lines ahead.
|
||||
prfm pldl1strm, [x1, #128]
|
||||
prfm pldl1strm, [x1, #256]
|
||||
prfm pldl1strm, [x1, #384]
|
||||
alternative_else_nop_endif
|
||||
|
||||
ldp x2, x3, [x1]
|
||||
ldp x4, x5, [x1, #16]
|
||||
ldp x6, x7, [x1, #32]
|
||||
@@ -39,10 +32,6 @@ alternative_else_nop_endif
|
||||
1:
|
||||
tst x0, #(PAGE_SIZE - 1)
|
||||
|
||||
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
||||
prfm pldl1strm, [x1, #384]
|
||||
alternative_else_nop_endif
|
||||
|
||||
stnp x2, x3, [x0, #-256]
|
||||
ldp x2, x3, [x1]
|
||||
stnp x4, x5, [x0, #16 - 256]
|
||||
|
||||
@@ -338,12 +338,7 @@ EXPORT_SYMBOL_GPL(arm64_mm_context_put);
|
||||
/* Errata workaround post TTBRx_EL1 update. */
|
||||
asmlinkage void post_ttbr_update_workaround(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
|
||||
return;
|
||||
|
||||
asm(ALTERNATIVE("nop; nop; nop",
|
||||
"ic iallu; dsb nsh; isb",
|
||||
ARM64_WORKAROUND_CAVIUM_27456));
|
||||
return;
|
||||
}
|
||||
|
||||
void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Internal CPU capabilities constants, keep this list sorted
|
||||
# Internal CPU capabilities constants
|
||||
|
||||
BTI
|
||||
# Unreliable: use system_supports_32bit_el0() instead.
|
||||
@@ -26,7 +26,8 @@ HAS_IRQ_PRIO_MASKING
|
||||
HAS_LDAPR
|
||||
HAS_LSE_ATOMICS
|
||||
HAS_NO_FPSIMD
|
||||
HAS_NO_HW_PREFETCH
|
||||
# HAS_NO_HW_PREFETCH
|
||||
ANDROID_RESERVED_1
|
||||
HAS_PAN
|
||||
HAS_RAS_EXTN
|
||||
HAS_RNG
|
||||
@@ -54,15 +55,20 @@ WORKAROUND_858921
|
||||
WORKAROUND_1418040
|
||||
WORKAROUND_1463225
|
||||
WORKAROUND_1508412
|
||||
WORKAROUND_1542419
|
||||
# WORKAROUND_1542419
|
||||
ANDROID_RESERVED_2
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
WORKAROUND_CAVIUM_23154
|
||||
WORKAROUND_CAVIUM_27456
|
||||
WORKAROUND_CAVIUM_30115
|
||||
WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
WORKAROUND_CAVIUM_TX2_219_TVM
|
||||
# WORKAROUND_CAVIUM_27456
|
||||
ANDROID_RESERVED_3
|
||||
# WORKAROUND_CAVIUM_30115
|
||||
ANDROID_RESERVED_4
|
||||
# WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
ANDROID_RESERVED_5
|
||||
# WORKAROUND_CAVIUM_TX2_219_TVM
|
||||
ANDROID_RESERVED_6
|
||||
WORKAROUND_CLEAN_CACHE
|
||||
WORKAROUND_DEVICE_LOAD_ACQUIRE
|
||||
WORKAROUND_NVIDIA_CARMEL_CNP
|
||||
|
||||
Reference in New Issue
Block a user