Commit 591effbd authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 4.17.17

parent 156ae521
......@@ -315,6 +315,18 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return false;
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static inline int kvm_arm_have_ssbd(void)
{
/* No way to detect it yet, pretend it is not there. */
return KVM_SSBD_UNKNOWN;
}
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
......
......@@ -335,6 +335,11 @@ static inline int kvm_map_vectors(void)
return 0;
}
static inline int hyp_map_aux_data(void)
{
return 0;
}
#define kvm_phys_to_vttbr(addr) (addr)
#endif /* !__ASSEMBLY__ */
......
......@@ -708,7 +708,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
}
/* dst = dst >> src */
static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
bool sstk, struct jit_ctx *ctx) {
const u8 *tmp = bpf2a32[TMP_REG_1];
const u8 *tmp2 = bpf2a32[TMP_REG_2];
......@@ -724,7 +724,7 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
}
/* Do LSH operation */
/* Do RSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
......@@ -774,7 +774,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
}
/* dst = dst >> val */
static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk,
static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk,
const u32 val, struct jit_ctx *ctx) {
const u8 *tmp = bpf2a32[TMP_REG_1];
const u8 *tmp2 = bpf2a32[TMP_REG_2];
......@@ -1330,7 +1330,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_RSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
emit_a32_lsr_i64(dst, dstk, imm, ctx);
emit_a32_rsh_i64(dst, dstk, imm, ctx);
break;
/* dst = dst << src */
case BPF_ALU64 | BPF_LSH | BPF_X:
......@@ -1338,7 +1338,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
/* dst = dst >> src */
case BPF_ALU64 | BPF_RSH | BPF_X:
emit_a32_lsr_r64(dst, src, dstk, sstk, ctx);
emit_a32_rsh_r64(dst, src, dstk, sstk, ctx);
break;
/* dst = dst >> src (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_X:
......@@ -1928,7 +1928,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
set_memory_ro((unsigned long)header, header->pages);
bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
......
......@@ -938,6 +938,15 @@ config HARDEN_EL2_VECTORS
If unsure, say Y.
config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT
default y
help
This enables mitigation of the bypassing of previous stores
by speculative loads.
If unsure, say Y.
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
......
......@@ -93,20 +93,12 @@ reg_12v: regulator2 {
regulator-always-on;
};
rsnd_ak4613: sound {
compatible = "simple-audio-card";
sound_card: sound {
compatible = "audio-graph-card";
simple-audio-card,format = "left_j";
simple-audio-card,bitclock-master = <&sndcpu>;
simple-audio-card,frame-master = <&sndcpu>;
label = "rcar-sound";
sndcpu: simple-audio-card,cpu {
sound-dai = <&rcar_sound>;
};
sndcodec: simple-audio-card,codec {
sound-dai = <&ak4613>;
};
dais = <&rsnd_port0>;
};
vbus0_usb2: regulator-vbus0-usb2 {
......@@ -322,6 +314,12 @@ ak4613: codec@10 {
asahi-kasei,out4-single-end;
asahi-kasei,out5-single-end;
asahi-kasei,out6-single-end;
port {
ak4613_endpoint: endpoint {
remote-endpoint = <&rsnd_endpoint0>;
};
};
};
cs2000: clk_multiplier@4f {
......@@ -581,10 +579,18 @@ &rcar_sound {
<&audio_clk_c>,
<&cpg CPG_CORE CPG_AUDIO_CLK_I>;
rcar_sound,dai {
dai0 {
playback = <&ssi0 &src0 &dvc0>;
capture = <&ssi1 &src1 &dvc1>;
ports {
rsnd_port0: port@0 {
rsnd_endpoint0: endpoint {
remote-endpoint = <&ak4613_endpoint>;
dai-format = "left_j";
bitclock-master = <&rsnd_endpoint0>;
frame-master = <&rsnd_endpoint0>;
playback = <&ssi0 &src0 &dvc0>;
capture = <&ssi1 &src1 &dvc1>;
};
};
};
};
......
......@@ -333,6 +333,8 @@ CONFIG_GPIO_XGENE_SB=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_MAX77620=y
CONFIG_POWER_AVS=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_POWER_RESET_MSM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
......
......@@ -204,7 +204,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long tmp; \
\
asm volatile( \
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" sevl\n" \
" wfe\n" \
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \
......
......@@ -48,7 +48,8 @@
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
#define ARM64_SSBD 30
#define ARM64_NCAPS 30
#define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */
......@@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void)
return zcr;
}
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
#define ARM64_SSBD_FORCE_ENABLE 2
#define ARM64_SSBD_MITIGATED 3
static inline int arm64_get_ssbd_state(void)
{
#ifdef CONFIG_ARM64_SSBD
extern int ssbd_state;
return ssbd_state;
#else
return ARM64_SSBD_UNKNOWN;
#endif
}
#ifdef CONFIG_ARM64_SSBD
void arm64_set_ssbd_mitigation(bool state);
#else
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
#endif /* __ASSEMBLY__ */
#endif
......@@ -33,6 +33,9 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
/* Translate a kernel address of @sym into its equivalent linear mapping */
#define kvm_ksym_ref(sym) \
({ \
......@@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void);
extern u32 __init_stage2_translation(void);
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(&sym))__ptr; \
})
#define __hyp_this_cpu_read(sym) \
({ \
*__hyp_this_cpu_ptr(sym); \
})
#else /* __ASSEMBLY__ */
.macro get_host_ctxt reg, tmp
adr_l \reg, kvm_host_cpu_state
.macro hyp_adr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
add \reg, \reg, \tmp
.endm
.macro hyp_ldr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
ldr \reg, [\reg, \tmp]
.endm
.macro get_host_ctxt reg, tmp
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
.endm
.macro get_vcpu_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
......
......@@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;
/* Guest debug state */
u64 debug_flags;
......@@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static inline int kvm_arm_have_ssbd(void)
{
switch (arm64_get_ssbd_state()) {
case ARM64_SSBD_FORCE_DISABLE:
return KVM_SSBD_FORCE_DISABLE;
case ARM64_SSBD_KERNEL:
return KVM_SSBD_KERNEL;
case ARM64_SSBD_FORCE_ENABLE:
return KVM_SSBD_FORCE_ENABLE;
case ARM64_SSBD_MITIGATED:
return KVM_SSBD_MITIGATED;
case ARM64_SSBD_UNKNOWN:
default:
return KVM_SSBD_UNKNOWN;
}
}
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
......
......@@ -473,6 +473,30 @@ static inline int kvm_map_vectors(void)
}
#endif
#ifdef CONFIG_ARM64_SSBD
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
static inline int hyp_map_aux_data(void)
{
int cpu, err;
for_each_possible_cpu(cpu) {
u64 *ptr;
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
if (err)
return err;
}
return 0;
}
#else
static inline int hyp_map_aux_data(void)
{
return 0;
}
#endif
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
#endif /* __ASSEMBLY__ */
......
......@@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_32BIT 22 /* 32bit process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
......
......@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
......
......@@ -136,6 +136,7 @@ int main(void)
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
......
......@@ -232,6 +232,178 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
static const struct ssbd_options {
const char *str;
int state;
} ssbd_options[] = {
{ "force-on", ARM64_SSBD_FORCE_ENABLE, },
{ "force-off", ARM64_SSBD_FORCE_DISABLE, },
{ "kernel", ARM64_SSBD_KERNEL, },
};
static int __init ssbd_cfg(char *buf)
{
int i;
if (!buf || !buf[0])
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
int len = strlen(ssbd_options[i].str);
if (strncmp(buf, ssbd_options[i].str, len))
continue;
ssbd_state = ssbd_options[i].state;
return 0;
}
return -EINVAL;
}
early_param("ssbd", ssbd_cfg);
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr,
int nr_inst)
{
u32 insn;
BUG_ON(nr_inst != 1);
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
insn = aarch64_insn_get_hvc_value();
break;
case PSCI_CONDUIT_SMC:
insn = aarch64_insn_get_smc_value();
break;
default:
return;
}
*updptr = cpu_to_le32(insn);
}
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr,
int nr_inst)
{
BUG_ON(nr_inst != 1);
/*
* Only allow mitigation on EL1 entry/exit and guest
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
* be flipped.
*/
if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}
void arm64_set_ssbd_mitigation(bool state)
{
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
break;
default:
WARN_ON_ONCE(1);
break;
}
}
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
int scope)
{
struct arm_smccc_res res;
bool required = true;
s32 val;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
ssbd_state = ARM64_SSBD_UNKNOWN;
return false;
}
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
break;
default:
ssbd_state = ARM64_SSBD_UNKNOWN;
return false;
}
val = (s32)res.a0;
switch (val) {
case SMCCC_RET_NOT_SUPPORTED:
ssbd_state = ARM64_SSBD_UNKNOWN;
return false;
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
return false;
case SMCCC_RET_SUCCESS:
required = true;
break;
case 1: /* Mitigation not required on this CPU */
required = false;
break;
default:
WARN_ON(1);
return false;
}
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
pr_info_once("%s disabled from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(false);
required = false;
break;
case ARM64_SSBD_KERNEL:
if (required) {
__this_cpu_write(arm64_ssbd_callback_required, 1);
arm64_set_ssbd_mitigation(true);
}
break;
case ARM64_SSBD_FORCE_ENABLE:
pr_info_once("%s forced from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(true);
required = true;
break;
default:
WARN_ON(1);
break;
}
return required;
}
#endif /* CONFIG_ARM64_SSBD */
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
......@@ -487,6 +659,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypass Disable",
.capability = ARM64_SSBD,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation,
},
#endif
{
}
......
......@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
......@@ -137,6 +138,25 @@ alternative_else_nop_endif
add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
// This macro corrupts x0-x3. It is the caller's duty
// to save/restore them if required.
.macro apply_ssbd, state, targ, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
b \targ
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, \targ
ldr \tmp2, [tsk, #TSK_TI_FLAGS]
tbnz \tmp2, #TIF_SSBD, \targ
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
alternative_cb arm64_update_smccc_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
#endif
.endm
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
......@@ -163,6 +183,14 @@ alternative_else_nop_endif
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
apply_ssbd 1, 1f, x22, x23
#ifdef CONFIG_ARM64_SSBD
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
#endif
1:
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
......@@ -303,6 +331,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
apply_ssbd 0, 5f, x0, x1