Commit a9d2e102 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Debian changes 4.19.37-2parrot1.37t

linux (4.19.37-2parrot1.37t) testing; urgency=medium

  * Import new Debian release.

linux (4.19.37-2) unstable; urgency=high

  * debian/bin: Fix Python static checker regressions (Closes: #928618)
  * Clean up speculation mitigations:
    - Documentation/l1tf: Fix small spelling typo
    - x86/cpu: Sanitize FAM6_ATOM naming
    - kvm: x86: Report STIBP on GET_SUPPORTED_CPUID
    - x86/msr-index: Cleanup bit defines
    - x86/speculation: Consolidate CPU whitelists
    - Documentation: Move L1TF to separate directory
    - cpu/speculation: Add 'mitigations=' cmdline option
    - x86/speculation: Support 'mitigations=' cmdline option
    - powerpc/speculation: Support 'mitigations=' cmdline option
    - s390/speculation: Support 'mitigations=' cmdline option
    - x86/speculation/mds: Add 'mitigations=' support for MDS
  * [x86] Mitigate Microarchitectural Data Sampling (MDS) vulnerabilities
    (CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091):
 ...
parents d91a0162 2c5c995f
......@@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
unsigned int l;
if (!may_use_simd()) {
*crc = crc_t10dif_generic(*crc, data, length);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length);
kernel_neon_end();
} else {
if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
*crc = crc_t10dif_generic(*crc, data, l);
length -= l;
data += l;
}
if (length > 0) {
kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length);
kernel_neon_end();
}
*crc = crc_t10dif_generic(*crc, data, length);
}
return 0;
}
......
......@@ -212,10 +212,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
adr r3,sha256_block_data_order
adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
......
......@@ -93,10 +93,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
adr r3,sha256_block_data_order
adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
......
......@@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
adr r3,sha512_block_data_order
adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
......
......@@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
adr r3,sha512_block_data_order
adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
......
......@@ -11,6 +11,8 @@
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
#else
#define wfe() do { } while (0)
#endif
#if __LINUX_ARM_ARCH__ >= 7
......
......@@ -25,7 +25,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
......
......@@ -48,6 +48,7 @@
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
......@@ -147,6 +148,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t;
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
bool be;
bool reset;
};
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
......@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
struct vcpu_reset_state reset_state;
/* Detect first run of a vcpu */
bool has_run_once;
};
......
......@@ -95,7 +95,11 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
#define cpu_relax() smp_mb()
#define cpu_relax() \
do { \
smp_mb(); \
__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
} while (0)
#else
#define cpu_relax() barrier()
#endif
......
......@@ -49,7 +49,7 @@
* (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
*/
#define EXC_RET_STACK_MASK 0x00000004
#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
/* Cache related definitions */
......
......@@ -127,7 +127,8 @@
*/
.macro v7m_exception_slow_exit ret_r0
cpsid i
ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
ldr lr, =exc_ret
ldr lr, [lr]
@ read original r12, sp, lr, pc and xPSR
add r12, sp, #S_IP
......
......@@ -146,3 +146,7 @@ ENTRY(vector_table)
.rept CONFIG_CPU_V7M_NUM_IRQ
.long __irq_entry @ External Interrupts
.endr
.align 2
.globl exc_ret
exc_ret:
.space 4
......@@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
......@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs;
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
/*
* If this is a per-CPU interrupt, or the affinity does not
* include this CPU, then we have nothing to do.
*/
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
ret = true;
}
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(irq_data_get_affinity_mask(d), affinity);
return ret;
}
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken)
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */
......@@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
while (1)
while (1) {
cpu_relax();
wfe();
}
}
void crash_smp_send_stop(void)
......
......@@ -16,7 +16,7 @@ struct patch {
unsigned int insn;
};
static DEFINE_SPINLOCK(patch_lock);
static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
__acquires(&patch_lock)
......@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
return addr;
if (flags)
spin_lock_irqsave(&patch_lock, *flags);
raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
......@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
clear_fixmap(fixmap);
if (flags)
spin_unlock_irqrestore(&patch_lock, *flags);
raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
}
......
......@@ -254,7 +254,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
......@@ -604,8 +604,10 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
while (1)
while (1) {
cpu_relax();
wfe();
}
}
static DEFINE_PER_CPU(struct completion *, cpu_completion);
......
......@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
......@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
spin_lock_irqsave(&unwind_lock, flags);
raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
......@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
spin_unlock_irqrestore(&unwind_lock, flags);
raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
......@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
spin_lock_irqsave(&unwind_lock, flags);
raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
......@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
spin_lock_irqsave(&unwind_lock, flags);
raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
spin_unlock_irqrestore(&unwind_lock, flags);
raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
......@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
reset_coproc_regs(vcpu, table, num);
for (num = 1; num < NR_CP15_REGS; num++)
if (vcpu_cp15(vcpu, num) == 0x42424242)
panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
WARN(vcpu_cp15(vcpu, num) == 0x42424242,
"Didn't reset vcpu_cp15(vcpu, %zi)", num);
}
......@@ -26,6 +26,7 @@
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_arch_timer.h>
......@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
/*
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if (READ_ONCE(vcpu->arch.reset_state.reset)) {
unsigned long target_pc = vcpu->arch.reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (target_pc & 1) {
target_pc &= ~1UL;
vcpu_set_thumb(vcpu);
}
/* Propagate caller endianness */
if (vcpu->arch.reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu->arch.reset_state.reset = false;
}
/* Reset arch_timer context */
return kvm_timer_vcpu_reset(vcpu);
}
......@@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
CFLAGS_xor-neon.o += $(NEON_FLAGS)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
endif
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment