Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
packages
kernel
linux
Commits
f4157cb5
Commit
f4157cb5
authored
Oct 26, 2021
by
Lorenzo "Palinuro" Faletra
Browse files
Import Upstream version 5.14.9
parent
49e4b437
Pipeline
#4416
failed with stages
Changes
445
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Documentation/devicetree/bindings/arm/tegra.yaml
View file @
f4157cb5
...
...
@@ -54,7 +54,7 @@ properties:
-
const
:
toradex,apalis_t30
-
const
:
nvidia,tegra30
-
items
:
-
const
:
toradex,apalis_t30-
eval-v1.1
-
const
:
toradex,apalis_t30-
v1.1-eval
-
const
:
toradex,apalis_t30-eval
-
const
:
toradex,apalis_t30-v1.1
-
const
:
toradex,apalis_t30
...
...
Documentation/devicetree/bindings/mtd/gpmc-nand.txt
View file @
f4157cb5
...
...
@@ -122,7 +122,7 @@ on various other factors also like;
so the device should have enough free bytes available its OOB/Spare
area to accommodate ECC for entire page. In general following expression
helps in determining if given device can accommodate ECC syndrome:
"2 + (PAGESIZE / 512) * ECC_BYTES"
>
= OOBSIZE"
"2 + (PAGESIZE / 512) * ECC_BYTES"
<
= OOBSIZE"
where
OOBSIZE number of bytes in OOB/spare area
PAGESIZE number of bytes in main-area of device page
...
...
Documentation/driver-api/cxl/memory-devices.rst
View file @
f4157cb5
...
...
@@ -36,7 +36,7 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h
:internal:
.. kernel-doc:: drivers/cxl/core.c
.. kernel-doc:: drivers/cxl/core
/bus
.c
:doc: cxl core
External Interfaces
...
...
Makefile
View file @
f4157cb5
# SPDX-License-Identifier: GPL-2.0
VERSION
=
5
PATCHLEVEL
=
14
SUBLEVEL
=
6
SUBLEVEL
=
9
EXTRAVERSION
=
NAME
=
Opossums on Parade
...
...
arch/alpha/include/asm/io.h
View file @
f4157cb5
...
...
@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
* Change virtual addresses to physical addresses and vv.
*/
#ifdef USE_48_BIT_KSEG
static
inline
unsigned
long
virt_to_phys
(
void
*
address
)
static
inline
unsigned
long
virt_to_phys
(
volatile
void
*
address
)
{
return
(
unsigned
long
)
address
-
IDENT_ADDR
;
}
...
...
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
return
(
void
*
)
(
address
+
IDENT_ADDR
);
}
#else
static
inline
unsigned
long
virt_to_phys
(
void
*
address
)
static
inline
unsigned
long
virt_to_phys
(
volatile
void
*
address
)
{
unsigned
long
phys
=
(
unsigned
long
)
address
;
...
...
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
extern
unsigned
long
__direct_map_base
;
extern
unsigned
long
__direct_map_size
;
static
inline
unsigned
long
__deprecated
virt_to_bus
(
void
*
address
)
static
inline
unsigned
long
__deprecated
virt_to_bus
(
volatile
void
*
address
)
{
unsigned
long
phys
=
virt_to_phys
(
address
);
unsigned
long
bus
=
phys
+
__direct_map_base
;
...
...
arch/arc/mm/cache.c
View file @
f4157cb5
...
...
@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
clear_page
(
to
);
clear_bit
(
PG_dc_clean
,
&
page
->
flags
);
}
EXPORT_SYMBOL
(
clear_user_page
);
/**********************************************************************
* Explicit Cache flush request from user space via syscall
...
...
arch/arm64/include/asm/assembler.h
View file @
f4157cb5
...
...
@@ -525,6 +525,11 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif
#ifdef CONFIG_KASAN_HW_TAGS
#define EXPORT_SYMBOL_NOHWKASAN(name)
#else
#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
...
...
arch/arm64/include/asm/mte.h
View file @
f4157cb5
...
...
@@ -105,11 +105,17 @@ void mte_check_tfsr_el1(void);
static
inline
void
mte_check_tfsr_entry
(
void
)
{
if
(
!
system_supports_mte
())
return
;
mte_check_tfsr_el1
();
}
static
inline
void
mte_check_tfsr_exit
(
void
)
{
if
(
!
system_supports_mte
())
return
;
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
...
...
arch/arm64/include/asm/string.h
View file @
f4157cb5
...
...
@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern
char
*
strchr
(
const
char
*
,
int
c
);
#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP
extern
int
strcmp
(
const
char
*
,
const
char
*
);
#define __HAVE_ARCH_STRNCMP
extern
int
strncmp
(
const
char
*
,
const
char
*
,
__kernel_size_t
);
#endif
#define __HAVE_ARCH_STRLEN
extern
__kernel_size_t
strlen
(
const
char
*
);
...
...
arch/arm64/kernel/cacheinfo.c
View file @
f4157cb5
...
...
@@ -43,7 +43,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf
->
type
=
type
;
}
static
int
__
init_cache_level
(
unsigned
int
cpu
)
int
init_cache_level
(
unsigned
int
cpu
)
{
unsigned
int
ctype
,
level
,
leaves
,
fw_level
;
struct
cpu_cacheinfo
*
this_cpu_ci
=
get_cpu_cacheinfo
(
cpu
);
...
...
@@ -78,7 +78,7 @@ static int __init_cache_level(unsigned int cpu)
return
0
;
}
static
int
__
populate_cache_leaves
(
unsigned
int
cpu
)
int
populate_cache_leaves
(
unsigned
int
cpu
)
{
unsigned
int
level
,
idx
;
enum
cache_type
type
;
...
...
@@ -97,6 +97,3 @@ static int __populate_cache_leaves(unsigned int cpu)
}
return
0
;
}
DEFINE_SMP_CALL_CACHE_FUNCTION
(
init_cache_level
)
DEFINE_SMP_CALL_CACHE_FUNCTION
(
populate_cache_leaves
)
arch/arm64/kernel/cpufeature.c
View file @
f4157cb5
...
...
@@ -1500,9 +1500,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
* ThunderX leads to apparent I-cache corruption of kernel text, which
* ends as well as you might imagine. Don't even try.
* ends as well as you might imagine. Don't even try. We cannot rely
* on the cpus_have_*cap() helpers here to detect the CPU erratum
* because cpucap detection order may change. However, since we know
* affected CPUs are always in a homogeneous configuration, it is
* safe to rely on this_cpu_has_cap() here.
*/
if
(
cpu
s
_ha
ve_const
_cap
(
ARM64_WORKAROUND_CAVIUM_27456
))
{
if
(
this_
cpu_ha
s
_cap
(
ARM64_WORKAROUND_CAVIUM_27456
))
{
str
=
"ARM64_WORKAROUND_CAVIUM_27456"
;
__kpti_forced
=
-
1
;
}
...
...
arch/arm64/kernel/fpsimd.c
View file @
f4157cb5
...
...
@@ -511,7 +511,7 @@ size_t sve_state_size(struct task_struct const *task)
void
sve_alloc
(
struct
task_struct
*
task
)
{
if
(
task
->
thread
.
sve_state
)
{
memset
(
task
->
thread
.
sve_state
,
0
,
sve_state_size
(
current
));
memset
(
task
->
thread
.
sve_state
,
0
,
sve_state_size
(
task
));
return
;
}
...
...
arch/arm64/kernel/mte.c
View file @
f4157cb5
...
...
@@ -173,12 +173,7 @@ bool mte_report_once(void)
#ifdef CONFIG_KASAN_HW_TAGS
void
mte_check_tfsr_el1
(
void
)
{
u64
tfsr_el1
;
if
(
!
system_supports_mte
())
return
;
tfsr_el1
=
read_sysreg_s
(
SYS_TFSR_EL1
);
u64
tfsr_el1
=
read_sysreg_s
(
SYS_TFSR_EL1
);
if
(
unlikely
(
tfsr_el1
&
SYS_TFSR_EL1_TF1
))
{
/*
...
...
@@ -221,6 +216,9 @@ void mte_thread_init_user(void)
void
mte_thread_switch
(
struct
task_struct
*
next
)
{
if
(
!
system_supports_mte
())
return
;
/*
* Check if an async tag exception occurred at EL1.
*
...
...
arch/arm64/kernel/process.c
View file @
f4157cb5
...
...
@@ -60,7 +60,7 @@
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned
long
__stack_chk_guard
__r
ead_mostly
;
unsigned
long
__stack_chk_guard
__r
o_after_init
;
EXPORT_SYMBOL
(
__stack_chk_guard
);
#endif
...
...
arch/arm64/kvm/arm.c
View file @
f4157cb5
...
...
@@ -1220,6 +1220,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if
(
copy_from_user
(
&
reg
,
argp
,
sizeof
(
reg
)))
break
;
/*
* We could owe a reset due to PSCI. Handle the pending reset
* here to ensure userspace register accesses are ordered after
* the reset.
*/
if
(
kvm_check_request
(
KVM_REQ_VCPU_RESET
,
vcpu
))
kvm_reset_vcpu
(
vcpu
);
if
(
ioctl
==
KVM_SET_ONE_REG
)
r
=
kvm_arm_set_reg
(
vcpu
,
&
reg
);
else
...
...
arch/arm64/kvm/handle_exit.c
View file @
f4157cb5
...
...
@@ -292,11 +292,12 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror
(
vcpu
,
kvm_vcpu_get_esr
(
vcpu
));
}
void
__noreturn
__cold
nvhe_hyp_panic_handler
(
u64
esr
,
u64
spsr
,
u64
elr
,
void
__noreturn
__cold
nvhe_hyp_panic_handler
(
u64
esr
,
u64
spsr
,
u64
elr_virt
,
u64
elr_phys
,
u64
par
,
uintptr_t
vcpu
,
u64
far
,
u64
hpfar
)
{
u64
elr_in_kimg
=
__phys_to_kimg
(
__hyp_pa
(
elr
)
);
u64
hyp_offset
=
elr_in_kimg
-
kaslr_offset
()
-
elr
;
u64
elr_in_kimg
=
__phys_to_kimg
(
elr_phys
);
u64
hyp_offset
=
elr_in_kimg
-
kaslr_offset
()
-
elr
_virt
;
u64
mode
=
spsr
&
PSR_MODE_MASK
;
/*
...
...
@@ -309,20 +310,24 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
kvm_err
(
"Invalid host exception to nVHE hyp!
\n
"
);
}
else
if
(
ESR_ELx_EC
(
esr
)
==
ESR_ELx_EC_BRK64
&&
(
esr
&
ESR_ELx_BRK64_ISS_COMMENT_MASK
)
==
BUG_BRK_IMM
)
{
struct
bug_entry
*
bug
=
find_bug
(
elr_in_kimg
);
const
char
*
file
=
NULL
;
unsigned
int
line
=
0
;
/* All hyp bugs, including warnings, are treated as fatal. */
if
(
bug
)
bug_get_file_line
(
bug
,
&
file
,
&
line
);
if
(
!
is_protected_kvm_enabled
()
||
IS_ENABLED
(
CONFIG_NVHE_EL2_DEBUG
))
{
struct
bug_entry
*
bug
=
find_bug
(
elr_in_kimg
);
if
(
bug
)
bug_get_file_line
(
bug
,
&
file
,
&
line
);
}
if
(
file
)
kvm_err
(
"nVHE hyp BUG at: %s:%u!
\n
"
,
file
,
line
);
else
kvm_err
(
"nVHE hyp BUG at: %016llx!
\n
"
,
elr
+
hyp_offset
);
kvm_err
(
"nVHE hyp BUG at: %016llx!
\n
"
,
elr
_virt
+
hyp_offset
);
}
else
{
kvm_err
(
"nVHE hyp panic at: %016llx!
\n
"
,
elr
+
hyp_offset
);
kvm_err
(
"nVHE hyp panic at: %016llx!
\n
"
,
elr
_virt
+
hyp_offset
);
}
/*
...
...
@@ -334,5 +339,5 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
kvm_err
(
"Hyp Offset: 0x%llx
\n
"
,
hyp_offset
);
panic
(
"HYP panic:
\n
PS:%08llx PC:%016llx ESR:%08llx
\n
FAR:%016llx HPFAR:%016llx PAR:%016llx
\n
VCPU:%016lx
\n
"
,
spsr
,
elr
,
esr
,
far
,
hpfar
,
par
,
vcpu
);
spsr
,
elr
_virt
,
esr
,
far
,
hpfar
,
par
,
vcpu
);
}
arch/arm64/kvm/hyp/nvhe/host.S
View file @
f4157cb5
...
...
@@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
...
...
@@ -85,12 +86,24 @@ SYM_FUNC_START(__hyp_do_panic)
mov
x29
,
x0
#ifdef CONFIG_NVHE_EL2_DEBUG
/
*
Ensure
host
stage
-
2
is
disabled
*/
mrs
x0
,
hcr_el2
bic
x0
,
x0
,
#
HCR_VM
msr
hcr_el2
,
x0
isb
tlbi
vmalls12e1
dsb
nsh
#endif
/
*
Load
the
panic
arguments
into
x0
-
7
*/
mrs
x0
,
esr_el2
get_vcpu_ptr
x4
,
x5
mrs
x5
,
far_el2
mrs
x6
,
hpfar_el2
mov
x7
,
xzr
//
Unused
argument
mov
x4
,
x3
mov
x3
,
x2
hyp_pa
x3
,
x6
get_vcpu_ptr
x5
,
x6
mrs
x6
,
far_el2
mrs
x7
,
hpfar_el2
/
*
Enter
the
host
,
conditionally
restoring
the
host
context
.
*/
cbz
x29
,
__host_enter_without_restoring
...
...
arch/arm64/kvm/reset.c
View file @
f4157cb5
...
...
@@ -210,10 +210,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
*/
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vcpu_reset_state
reset_state
;
int
ret
;
bool
loaded
;
u32
pstate
;
mutex_lock
(
&
vcpu
->
kvm
->
lock
);
reset_state
=
vcpu
->
arch
.
reset_state
;
WRITE_ONCE
(
vcpu
->
arch
.
reset_state
.
reset
,
false
);
mutex_unlock
(
&
vcpu
->
kvm
->
lock
);
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset
(
vcpu
);
...
...
@@ -276,8 +282,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if
(
vcpu
->
arch
.
reset_state
.
reset
)
{
unsigned
long
target_pc
=
vcpu
->
arch
.
reset_state
.
pc
;
if
(
reset_state
.
reset
)
{
unsigned
long
target_pc
=
reset_state
.
pc
;
/* Gracefully handle Thumb2 entry point */
if
(
vcpu_mode_is_32bit
(
vcpu
)
&&
(
target_pc
&
1
))
{
...
...
@@ -286,13 +292,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
/* Propagate caller endianness */
if
(
vcpu
->
arch
.
reset_state
.
be
)
if
(
reset_state
.
be
)
kvm_vcpu_set_be
(
vcpu
);
*
vcpu_pc
(
vcpu
)
=
target_pc
;
vcpu_set_reg
(
vcpu
,
0
,
vcpu
->
arch
.
reset_state
.
r0
);
vcpu
->
arch
.
reset_state
.
reset
=
false
;
vcpu_set_reg
(
vcpu
,
0
,
reset_state
.
r0
);
}
/* Reset timer */
...
...
@@ -317,6 +321,14 @@ int kvm_set_ipa_limit(void)
mmfr0
=
read_sanitised_ftr_reg
(
SYS_ID_AA64MMFR0_EL1
);
parange
=
cpuid_feature_extract_unsigned_field
(
mmfr0
,
ID_AA64MMFR0_PARANGE_SHIFT
);
/*
* IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap
* it to 48 bits, in case it's reported as larger
* on the system.
*/
if
(
PAGE_SIZE
!=
SZ_64K
)
parange
=
min
(
parange
,
(
unsigned
int
)
ID_AA64MMFR0_PARANGE_48
);
/*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
...
...
arch/arm64/lib/strcmp.S
View file @
f4157cb5
...
...
@@ -173,4 +173,4 @@ L(done):
ret
SYM_FUNC_END_PI
(
strcmp
)
EXPORT_SYMBOL_NOKASAN
(
strcmp
)
EXPORT_SYMBOL_NO
HW
KASAN
(
strcmp
)
arch/arm64/lib/strncmp.S
View file @
f4157cb5
...
...
@@ -258,4 +258,4 @@ L(ret0):
ret
SYM_FUNC_END_PI
(
strncmp
)
EXPORT_SYMBOL_NOKASAN
(
strncmp
)
EXPORT_SYMBOL_NO
HW
KASAN
(
strncmp
)
Prev
1
2
3
4
5
…
23
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment