Commit 086e5551 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 5.4.8

parent d2ab784c
......@@ -134,7 +134,7 @@ enum {
GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
GPIO_FN_RD_WR, GPIO_FN_TCLK0,
GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
GPIO_FN_ET0_ETXD3_A,
GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
......
......@@ -83,7 +83,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
return 0;
}
static int full_read(int fd, void *buf, int len)
static int full_read(int fd, void *buf, int len, bool abortable)
{
int rc;
......@@ -93,7 +93,7 @@ static int full_read(int fd, void *buf, int len)
buf += rc;
len -= rc;
}
} while (len && (rc > 0 || rc == -EINTR));
} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
if (rc < 0)
return rc;
......@@ -104,7 +104,7 @@ static int full_read(int fd, void *buf, int len)
static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
{
return full_read(fd, msg, sizeof(msg->header));
return full_read(fd, msg, sizeof(msg->header), true);
}
static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
......@@ -118,7 +118,7 @@ static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
size = msg->header.size;
if (size > max_payload_size)
return -EPROTO;
return full_read(fd, &msg->payload, size);
return full_read(fd, &msg->payload, size, false);
}
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
......
......@@ -2,6 +2,8 @@
#ifndef _ASM_X86_CRASH_H
#define _ASM_X86_CRASH_H
struct kimage;
int crash_load_segments(struct kimage *image);
int crash_copy_backup_region(struct kimage *image);
int crash_setup_memmap_entries(struct kimage *image,
......
......@@ -156,7 +156,7 @@ extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT_XXL
......
......@@ -48,12 +48,13 @@
* To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
* named __ia32_sys_*()
*/
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __x64_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
asmlinkage long __x64_sys_##sname(void)
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL(name) \
cond_syscall(__x64_sys_##name); \
......@@ -181,11 +182,11 @@
* macros to work correctly.
*/
#ifndef SYSCALL_DEFINE0
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __x64_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
asmlinkage long __x64_sys_##sname(void)
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
#endif
#ifndef COND_SYSCALL
......
......@@ -1727,9 +1727,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
static inline bool ioapic_irqd_mask(struct irq_data *data)
{
/* If we are moving the irq we need to mask it */
/* If we are moving the IRQ we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
mask_ioapic_irq(data);
if (!irqd_irq_masked(data))
mask_ioapic_irq(data);
return true;
}
return false;
......@@ -1766,7 +1767,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
*/
if (!io_apic_level_ack_pending(data->chip_data))
irq_move_masked_irq(data);
unmask_ioapic_irq(data);
/* If the IRQ is masked in the core, leave it: */
if (!irqd_irq_masked(data))
unmask_ioapic_irq(data);
}
}
#else
......
......@@ -266,10 +266,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
smca_set_misc_banks_map(bank, cpu);
/* Return early if this bank was already initialized. */
if (smca_banks[bank].hwid)
if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
return;
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
return;
}
......
......@@ -814,8 +814,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
m->bank = i;
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;
......
......@@ -188,7 +188,7 @@ static void therm_throt_process(bool new_event, int event, int level)
/* if we just entered the thermal event */
if (new_event) {
if (event == THERMAL_THROTTLING_EVENT)
pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package",
state->count);
......
......@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x3e20,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
......
......@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
entry->edx |= F(SPEC_CTRL);
if (boot_cpu_has(X86_FEATURE_STIBP))
entry->edx |= F(INTEL_STIBP);
if (boot_cpu_has(X86_FEATURE_SSBD))
if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->edx |= F(SPEC_CTRL_SSBD);
/*
* We emulate ARCH_CAPABILITIES in software even
......@@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ebx |= F(AMD_IBRS);
if (boot_cpu_has(X86_FEATURE_STIBP))
entry->ebx |= F(AMD_STIBP);
if (boot_cpu_has(X86_FEATURE_SSBD))
if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->ebx |= F(AMD_SSBD);
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
entry->ebx |= F(AMD_SSB_NO);
......
......@@ -333,7 +333,7 @@ AVXcode: 1
06: CLTS
07: SYSRET (o64)
08: INVD
09: WBINVD
09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
......@@ -364,7 +364,7 @@ AVXcode: 1
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
1c:
1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
......@@ -792,6 +792,8 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
......@@ -943,9 +945,9 @@ GrpTable: Grp6
EndTable
GrpTable: Grp7
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
......@@ -1020,7 +1022,7 @@ GrpTable: Grp15
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
......@@ -1051,6 +1053,10 @@ GrpTable: Grp19
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
GrpTable: Grp20
0: cldemote Mb
EndTable
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
......
......@@ -107,6 +107,8 @@ static inline bool seg_writable(struct desc_struct *d)
#define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \
math_abort(FPU_info,SIGSEGV)
#define FPU_abort math_abort(FPU_info, SIGSEGV)
#define FPU_copy_from_user(to, from, n) \
do { if (copy_from_user(to, from, n)) FPU_abort; } while (0)
#undef FPU_IGNORE_CODE_SEGV
#ifdef FPU_IGNORE_CODE_SEGV
......@@ -122,7 +124,7 @@ static inline bool seg_writable(struct desc_struct *d)
#define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z)
#endif
#define FPU_get_user(x,y) get_user((x),(y))
#define FPU_put_user(x,y) put_user((x),(y))
#define FPU_get_user(x,y) do { if (get_user((x),(y))) FPU_abort; } while (0)
#define FPU_put_user(x,y) do { if (put_user((x),(y))) FPU_abort; } while (0)
#endif
......@@ -85,7 +85,7 @@ int FPU_load_extended(long double __user *s, int stnr)
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 10);
__copy_from_user(sti_ptr, s, 10);
FPU_copy_from_user(sti_ptr, s, 10);
RE_ENTRANT_CHECK_ON;
return FPU_tagof(sti_ptr);
......@@ -1126,9 +1126,9 @@ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
/* Copy all registers in stack order. */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 80);
__copy_from_user(register_base + offset, s, other);
FPU_copy_from_user(register_base + offset, s, other);
if (offset)
__copy_from_user(register_base, s + other, offset);
FPU_copy_from_user(register_base, s + other, offset);
RE_ENTRANT_CHECK_ON;
for (i = 0; i < 8; i++) {
......
......@@ -643,8 +643,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
fixmaps_set++;
}
void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
pgprot_t flags)
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
{
/* Sanitize 'prot' against any unsupported bits: */
pgprot_val(flags) &= __default_kernel_pte_mask;
......
......@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
{
struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
......@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
/* clear or maintain depending on the overage */
if (time_before_eq64(vtime, now->vnow)) {
blkcg_clear_delay(blkg);
return;
return false;
}
if (!atomic_read(&blkg->use_delay) &&
time_before_eq64(vtime, now->vnow + vmargin))
return;
return false;
/* use delay */
if (cost) {
......@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
if (hrtimer_is_queued(&iocg->delay_timer) &&
abs(oexpires - expires) <= margin_ns / 4)
return;
return true;
hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
margin_ns / 4, HRTIMER_MODE_ABS);
return true;
}
static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
......@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
*/
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
atomic64_add(abs_cost, &iocg->abs_vdebt);
iocg_kick_delay(iocg, &now, cost);
if (iocg_kick_delay(iocg, &now, cost))
blkcg_schedule_throttle(rqos->q,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
return;
}
......
......@@ -309,6 +309,7 @@ config CRYPTO_AEGIS128
config CRYPTO_AEGIS128_SIMD
bool "Support SIMD acceleration for AEGIS-128"
depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800
default y
config CRYPTO_AEGIS128_AESNI_SSE2
......
......@@ -93,7 +93,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o
aegis128-y := aegis128-core.o
ifeq ($(ARCH),arm)
CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp
CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv8-a -mfloat-abi=softfp
CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8
aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o
endif
......
......@@ -486,6 +486,7 @@ static int tpm_key_encrypt(struct tpm_key *tk,
if (ret < 0)
goto error_free_tfm;
ret = -ENOMEM;
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req)
goto error_free_tfm;
......
......@@ -184,6 +184,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ret = -ENOMEM;
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req)
goto error_free_tfm;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment