Commit cc02f499 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 5.4.13

parent 086e5551
// SPDX-License-Identifier: GPL-2.0
#define __HAVE_ARCH_MEMCMP /* arch function */
#include "../lib/string.c"
......@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
......@@ -14,6 +14,7 @@ config UML
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
select HAVE_COPY_THREAD_TLS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
......
......@@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request,
extern unsigned long getreg(struct task_struct *child, int regno);
extern int putreg(struct task_struct *child, int regno, unsigned long value);
extern int arch_copy_tls(struct task_struct *new);
extern int arch_set_tls(struct task_struct *new, unsigned long tls);
extern void clear_flushed_tls(struct task_struct *task);
extern int syscall_trace_enter(struct pt_regs *regs);
extern void syscall_trace_leave(struct pt_regs *regs);
......
......@@ -153,8 +153,8 @@ void fork_handler(void)
userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct * p)
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct * p, unsigned long tls)
{
void (*handler)(void);
int kthread = current->flags & PF_KTHREAD;
......@@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
ret = arch_copy_tls(p);
ret = arch_set_tls(p, tls);
}
return ret;
......
......@@ -10,13 +10,11 @@
#ifdef CONFIG_IA32_EMULATION
/* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
#define __sys_ni_syscall __ia32_sys_ni_syscall
#else /* CONFIG_IA32_EMULATION */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#define __sys_ni_syscall sys_ni_syscall
#endif /* CONFIG_IA32_EMULATION */
#include <asm/syscalls_32.h>
......@@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] =
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_compat_max] = &sys_ni_syscall,
[0 ... __NR_syscall_compat_max] = &__sys_ni_syscall,
#include <asm/syscalls_32.h>
};
......@@ -4,11 +4,17 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/asm-offsets.h>
#include <asm/syscall.h>
/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
extern asmlinkage long sys_ni_syscall(void);
SYSCALL_DEFINE0(ni_syscall)
{
return sys_ni_syscall();
}
#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
#define __SYSCALL_X32(nr, sym, qual) __SYSCALL_64(nr, sym, qual)
#include <asm/syscalls_64.h>
......@@ -23,7 +29,7 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_max] = &sys_ni_syscall,
[0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
......@@ -40,7 +46,7 @@ asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_syscall_x32_max+1] = {
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_x32_max] = &sys_ni_syscall,
[0 ... __NR_syscall_x32_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
......
......@@ -124,13 +124,13 @@
110 i386 iopl sys_iopl __ia32_sys_iopl
111 i386 vhangup sys_vhangup __ia32_sys_vhangup
112 i386 idle
113 i386 vm86old sys_vm86old sys_ni_syscall
113 i386 vm86old sys_vm86old __ia32_sys_ni_syscall
114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4
115 i386 swapoff sys_swapoff __ia32_sys_swapoff
116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo
117 i386 ipc sys_ipc __ia32_compat_sys_ipc
118 i386 fsync sys_fsync __ia32_sys_fsync
119 i386 sigreturn sys_sigreturn sys32_sigreturn
119 i386 sigreturn sys_sigreturn __ia32_compat_sys_sigreturn
120 i386 clone sys_clone __ia32_compat_sys_x86_clone
121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname
122 i386 uname sys_newuname __ia32_sys_newuname
......@@ -177,14 +177,14 @@
163 i386 mremap sys_mremap __ia32_sys_mremap
164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16
165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16
166 i386 vm86 sys_vm86 sys_ni_syscall
166 i386 vm86 sys_vm86 __ia32_sys_ni_syscall
167 i386 query_module
168 i386 poll sys_poll __ia32_sys_poll
169 i386 nfsservctl
170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16
171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16
172 i386 prctl sys_prctl __ia32_sys_prctl
173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
173 i386 rt_sigreturn sys_rt_sigreturn __ia32_compat_sys_rt_sigreturn
174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction
175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_compat_sys_rt_sigprocmask
176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending
......
......@@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return 0;
goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
......@@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
out:
atomic_inc(&active_events);
return 0;
......@@ -397,11 +398,15 @@ int x86_add_exclusive(unsigned int what)
void x86_del_exclusive(unsigned int what)
{
atomic_dec(&active_events);
/*
* See the comment in x86_add_exclusive().
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)
......@@ -1641,9 +1646,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr = \
struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
u64 config = x86_pmu.event_map(pmu_attr->id);
u64 config = 0;
if (pmu_attr->id < x86_pmu.max_events)
config = x86_pmu.event_map(pmu_attr->id);
/* string trumps id */
if (pmu_attr->event_str)
......@@ -1712,6 +1720,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct perf_pmu_events_attr *pmu_attr;
if (idx >= x86_pmu.max_events)
return 0;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
/* str trumps id */
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
......
......@@ -63,9 +63,17 @@ struct bts_buffer {
static struct pmu bts_pmu;
static int buf_nr_pages(struct page *page)
{
if (!PagePrivate(page))
return 1;
return 1 << page_private(page);
}
static size_t buf_size(struct page *page)
{
return 1 << (PAGE_SHIFT + page_private(page));
return buf_nr_pages(page) * PAGE_SIZE;
}
static void *
......@@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
/* count all the high order buffers */
for (pg = 0, nbuf = 0; pg < nr_pages;) {
page = virt_to_page(pages[pg]);
if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
return NULL;
pg += 1 << page_private(page);
pg += buf_nr_pages(page);
nbuf++;
}
......@@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
unsigned int __nr_pages;
page = virt_to_page(pages[pg]);
__nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
__nr_pages = buf_nr_pages(page);
buf->buf[nbuf].page = page;
buf->buf[nbuf].offset = offset;
buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
......
......@@ -21,6 +21,7 @@
#include <linux/personality.h>
#include <linux/compat.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/fpu/internal.h>
......@@ -118,7 +119,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
return err;
}
asmlinkage long sys32_sigreturn(void)
COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
......@@ -144,7 +145,7 @@ asmlinkage long sys32_sigreturn(void)
return 0;
}
asmlinkage long sys32_rt_sigreturn(void)
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_ia32 __user *frame;
......
......@@ -6,6 +6,8 @@
#ifndef _ASM_X86_SYSCALL_WRAPPER_H
#define _ASM_X86_SYSCALL_WRAPPER_H
struct pt_regs;
/* Mapping of registers to parameters for syscalls on x86-64 and x32 */
#define SC_X86_64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \
......@@ -28,13 +30,21 @@
* kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
* case as well.
*/
#define __IA32_COMPAT_SYS_STUB0(x, name) \
asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs);\
ALLOW_ERROR_INJECTION(__ia32_compat_sys_##name, ERRNO); \
asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs)\
{ \
return __se_compat_sys_##name(); \
}
#define __IA32_COMPAT_SYS_STUBx(x, name, ...) \
asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\
ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \
asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\
{ \
return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
} \
}
#define __IA32_SYS_STUBx(x, name, ...) \
asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \
......@@ -56,9 +66,15 @@
SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL(name) \
cond_syscall(__x64_sys_##name); \
cond_syscall(__ia32_sys_##name)
#define COND_SYSCALL(name) \
asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
{ \
return sys_ni_syscall(); \
} \
asmlinkage __weak long __ia32_sys_##name(const struct pt_regs *__unused)\
{ \
return sys_ni_syscall(); \
}
#define SYS_NI(name) \
SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \
......@@ -76,15 +92,24 @@
* of the x86-64-style parameter ordering of x32 syscalls. The syscalls common
* with x86_64 obviously do not need such care.
*/
#define __X32_COMPAT_SYS_STUB0(x, name, ...) \
asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs);\
ALLOW_ERROR_INJECTION(__x32_compat_sys_##name, ERRNO); \
asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs)\
{ \
return __se_compat_sys_##name();\
}
#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\
ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \
asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\
{ \
return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
} \
}
#else /* CONFIG_X86_X32 */
#define __X32_COMPAT_SYS_STUB0(x, name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
#endif /* CONFIG_X86_X32 */
......@@ -95,6 +120,17 @@
* mapping of registers to parameters, we need to generate stubs for each
* of them.
*/
#define COMPAT_SYSCALL_DEFINE0(name) \
static long __se_compat_sys_##name(void); \
static inline long __do_compat_sys_##name(void); \
__IA32_COMPAT_SYS_STUB0(x, name) \
__X32_COMPAT_SYS_STUB0(x, name) \
static long __se_compat_sys_##name(void) \
{ \
return __do_compat_sys_##name(); \
} \
static inline long __do_compat_sys_##name(void)
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
......@@ -190,7 +226,11 @@
#endif
#ifndef COND_SYSCALL
#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name)
#define COND_SYSCALL(name) \
asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
{ \
return sys_ni_syscall(); \
}
#endif
#ifndef SYS_NI
......@@ -202,7 +242,6 @@
* For VSYSCALLS, we need to declare these three syscalls with the new
* pt_regs-based calling convention for in-kernel use.
*/
struct pt_regs;
asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs);
asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs);
asmlinkage long __x64_sys_time(const struct pt_regs *regs);
......
......@@ -714,6 +714,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x8a12,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
......
......@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
......
......@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
struct zone *zone = page_zone(page);
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
kernel_physical_mapping_remove(start, start + size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
......
......@@ -260,10 +260,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
/* No need to reserve regions that will never be freed. */
if (md.attribute & EFI_MEMORY_RUNTIME)
return;
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);
......@@ -293,6 +289,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
early_memunmap(new, new_size);
efi_memmap_install(new_phys, num_entries);
e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__update_table(e820_table);
}
/*
......
......@@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info,
return 0;
}
int arch_copy_tls(struct task_struct *new)
int arch_set_tls(struct task_struct *new, unsigned long tls)
{
struct user_desc info;
int idx, ret = -EFAULT;
if (copy_from_user(&info,
(void __user *) UPT_SI(&new->thread.regs.regs),
sizeof(info)))
if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
goto out;
ret = -EINVAL;
......
......@@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task)
{
}
int arch_copy_tls(struct task_struct *t)
int arch_set_tls(struct task_struct *t, unsigned long tls)
{
/*
* If CLONE_SETTLS is set, we need to save the thread id
* (which is argument 5, child_tid, of clone) so it can be set
* during context switches.
* so it can be set during context switches.
*/
t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
t->thread.arch.fs = tls;
return 0;
}
......@@ -22,6 +22,7 @@ config XTENSA
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if MMU
select HAVE_ARCH_TRACEHOOK
select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
......
......@@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* involved. Much simpler to just not copy those live frames across.
*/
int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
unsigned long thread_fn_arg, struct task_struct *p)
int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
unsigned long thread_fn_arg, struct task_struct *p,
unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
......@@ -264,9 +265,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
/* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = childregs->areg[5];
childregs->threadptr = tls;
} else {
p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_kernel_thread, 1);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment