Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
packages
kernel
linux
Commits
cc02f499
Commit
cc02f499
authored
Feb 01, 2020
by
Lorenzo "Palinuro" Faletra
Browse files
Import Upstream version 5.4.13
parent
086e5551
Changes
693
Hide whitespace changes
Inline
Side-by-side
arch/arm64/mm/fault.c
View file @
cc02f499
...
...
@@ -454,7 +454,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
const
struct
fault_info
*
inf
;
struct
mm_struct
*
mm
=
current
->
mm
;
vm_fault_t
fault
,
major
=
0
;
unsigned
long
vm_flags
=
VM_READ
|
VM_WRITE
;
unsigned
long
vm_flags
=
VM_READ
|
VM_WRITE
|
VM_EXEC
;
unsigned
int
mm_flags
=
FAULT_FLAG_ALLOW_RETRY
|
FAULT_FLAG_KILLABLE
;
if
(
kprobe_page_fault
(
regs
,
esr
))
...
...
arch/arm64/mm/mmu.c
View file @
cc02f499
...
...
@@ -1069,7 +1069,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned
long
start_pfn
=
start
>>
PAGE_SHIFT
;
unsigned
long
nr_pages
=
size
>>
PAGE_SHIFT
;
struct
zone
*
zone
;
/*
* FIXME: Cleanup page tables (also in arch_add_memory() in case
...
...
@@ -1078,7 +1077,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
* unlocked yet.
*/
zone
=
page_zone
(
pfn_to_page
(
start_pfn
));
__remove_pages
(
zone
,
start_pfn
,
nr_pages
,
altmap
);
__remove_pages
(
start_pfn
,
nr_pages
,
altmap
);
}
#endif
arch/hexagon/include/asm/atomic.h
View file @
cc02f499
...
...
@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
" if !P3 jump 1b;\n" \
" if
(
!P3
)
jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
...
...
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
" if !P3 jump 1b;\n" \
" if
(
!P3
)
jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
...
...
@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%2);\n" \
" %1 = "#op "(%0,%3);\n" \
" memw_locked(%2,P3)=%1;\n" \
" if !P3 jump 1b;\n" \
" if
(
!P3
)
jump 1b;\n" \
: "=&r" (output), "=&r" (val) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
...
...
@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
" }"
" memw_locked(%2, p3) = %1;"
" {"
" if !p3 jump 1b;"
" if
(
!p3
)
jump 1b;"
" }"
"2:"
:
"=&r"
(
__oldval
),
"=&r"
(
tmp
)
...
...
arch/hexagon/include/asm/bitops.h
View file @
cc02f499
...
...
@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);
\n
"
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }
\n
"
" memw_locked(R10,P1) = R12;
\n
"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}
\n
"
" {if
(
!P1
)
jump 1b; %0 = mux(P0,#1,#0);}
\n
"
:
"=&r"
(
oldval
)
:
"r"
(
addr
),
"r"
(
nr
)
:
"r10"
,
"r11"
,
"r12"
,
"p0"
,
"p1"
,
"memory"
...
...
@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);
\n
"
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }
\n
"
" memw_locked(R10,P1) = R12;
\n
"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}
\n
"
" {if
(
!P1
)
jump 1b; %0 = mux(P0,#1,#0);}
\n
"
:
"=&r"
(
oldval
)
:
"r"
(
addr
),
"r"
(
nr
)
:
"r10"
,
"r11"
,
"r12"
,
"p0"
,
"p1"
,
"memory"
...
...
@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);
\n
"
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }
\n
"
" memw_locked(R10,P1) = R12;
\n
"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}
\n
"
" {if
(
!P1
)
jump 1b; %0 = mux(P0,#1,#0);}
\n
"
:
"=&r"
(
oldval
)
:
"r"
(
addr
),
"r"
(
nr
)
:
"r10"
,
"r11"
,
"r12"
,
"p0"
,
"p1"
,
"memory"
...
...
@@ -223,7 +223,7 @@ static inline int ffs(int x)
int
r
;
asm
(
"{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}
\n
"
"{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}
\n
"
"{ if
(
P0
)
%0 = #0; if
(
!P0
)
%0 = add(%0,#1);}
\n
"
:
"=&r"
(
r
)
:
"r"
(
x
)
:
"p0"
);
...
...
arch/hexagon/include/asm/cmpxchg.h
View file @
cc02f499
...
...
@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
__asm__
__volatile__
(
"1: %0 = memw_locked(%1);
\n
"
/* load into retval */
" memw_locked(%1,P0) = %2;
\n
"
/* store into memory */
" if !P0 jump 1b;
\n
"
" if
(
!P0
)
jump 1b;
\n
"
:
"=&r"
(
retval
)
:
"r"
(
ptr
),
"r"
(
x
)
:
"memory"
,
"p0"
...
...
arch/hexagon/include/asm/futex.h
View file @
cc02f499
...
...
@@ -16,7 +16,7 @@
/* For example: %1 = %4 */
\
insn \
"2: memw_locked(%3,p2) = %1;\n" \
" if !p2 jump 1b;\n" \
" if
(
!p2
)
jump 1b;\n" \
" %1 = #0;\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
...
...
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
"1: %1 = memw_locked(%3)
\n
"
" {
\n
"
" p2 = cmp.eq(%1,%4)
\n
"
" if !p2.new jump:NT 3f
\n
"
" if
(
!p2.new
)
jump:NT 3f
\n
"
" }
\n
"
"2: memw_locked(%3,p2) = %5
\n
"
" if !p2 jump 1b
\n
"
" if
(
!p2
)
jump 1b
\n
"
"3:
\n
"
".section .fixup,
\"
ax
\"\n
"
"4: %0 = #%6
\n
"
...
...
arch/hexagon/include/asm/spinlock.h
View file @
cc02f499
...
...
@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
__asm__
__volatile__
(
"1: R6 = memw_locked(%0);
\n
"
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}
\n
"
" { if !P3 jump 1b; }
\n
"
" { if
(
!P3
)
jump 1b; }
\n
"
" memw_locked(%0,P3) = R6;
\n
"
" { if !P3 jump 1b; }
\n
"
" { if
(
!P3
)
jump 1b; }
\n
"
:
:
"r"
(
&
lock
->
lock
)
:
"memory"
,
"r6"
,
"p3"
...
...
@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
"1: R6 = memw_locked(%0);
\n
"
" R6 = add(R6,#-1);
\n
"
" memw_locked(%0,P3) = R6
\n
"
" if !P3 jump 1b;
\n
"
" if
(
!P3
)
jump 1b;
\n
"
:
:
"r"
(
&
lock
->
lock
)
:
"memory"
,
"r6"
,
"p3"
...
...
@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
__asm__
__volatile__
(
" R6 = memw_locked(%1);
\n
"
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}
\n
"
" { if !P3 jump 1f; }
\n
"
" { if
(
!P3
)
jump 1f; }
\n
"
" memw_locked(%1,P3) = R6;
\n
"
" { %0 = P3 }
\n
"
"1:
\n
"
...
...
@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
__asm__
__volatile__
(
"1: R6 = memw_locked(%0)
\n
"
" { P3 = cmp.eq(R6,#0); R6 = #-1;}
\n
"
" { if !P3 jump 1b; }
\n
"
" { if
(
!P3
)
jump 1b; }
\n
"
" memw_locked(%0,P3) = R6;
\n
"
" { if !P3 jump 1b; }
\n
"
" { if
(
!P3
)
jump 1b; }
\n
"
:
:
"r"
(
&
lock
->
lock
)
:
"memory"
,
"r6"
,
"p3"
...
...
@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
__asm__
__volatile__
(
" R6 = memw_locked(%1)
\n
"
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}
\n
"
" { if !P3 jump 1f; }
\n
"
" { if
(
!P3
)
jump 1f; }
\n
"
" memw_locked(%1,P3) = R6;
\n
"
" %0 = P3;
\n
"
"1:
\n
"
...
...
@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__
__volatile__
(
"1: R6 = memw_locked(%0);
\n
"
" P3 = cmp.eq(R6,#0);
\n
"
" { if !P3 jump 1b; R6 = #1; }
\n
"
" { if
(
!P3
)
jump 1b; R6 = #1; }
\n
"
" memw_locked(%0,P3) = R6;
\n
"
" { if !P3 jump 1b; }
\n
"
" { if
(
!P3
)
jump 1b; }
\n
"
:
:
"r"
(
&
lock
->
lock
)
:
"memory"
,
"r6"
,
"p3"
...
...
@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
__asm__
__volatile__
(
" R6 = memw_locked(%1);
\n
"
" P3 = cmp.eq(R6,#0);
\n
"
" { if !P3 jump 1f; R6 = #1; %0 = #0; }
\n
"
" { if
(
!P3
)
jump 1f; R6 = #1; %0 = #0; }
\n
"
" memw_locked(%1,P3) = R6;
\n
"
" %0 = P3;
\n
"
"1:
\n
"
...
...
arch/hexagon/kernel/stacktrace.c
View file @
cc02f499
...
...
@@ -11,8 +11,6 @@
#include
<linux/thread_info.h>
#include
<linux/module.h>
register
unsigned
long
current_frame_pointer
asm
(
"r30"
);
struct
stackframe
{
unsigned
long
fp
;
unsigned
long
rets
;
...
...
@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
low
=
(
unsigned
long
)
task_stack_page
(
current
);
high
=
low
+
THREAD_SIZE
;
fp
=
current_frame_pointer
;
fp
=
(
unsigned
long
)
__builtin_frame_address
(
0
)
;
while
(
fp
>=
low
&&
fp
<=
(
high
-
sizeof
(
*
frame
)))
{
frame
=
(
struct
stackframe
*
)
fp
;
...
...
arch/hexagon/kernel/vm_entry.S
View file @
cc02f499
...
...
@@ -369,7 +369,7 @@ ret_from_fork:
R26.L
=
#
LO
(
do_work_pending
)
;
R0
=
#
VM_INT_DISABLE
;
}
if
P0
jump
check_work_pending
if
(
P0
)
jump
check_work_pending
{
R0
=
R25
;
callr
R24
...
...
arch/ia64/mm/init.c
View file @
cc02f499
...
...
@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned
long
start_pfn
=
start
>>
PAGE_SHIFT
;
unsigned
long
nr_pages
=
size
>>
PAGE_SHIFT
;
struct
zone
*
zone
;
zone
=
page_zone
(
pfn_to_page
(
start_pfn
));
__remove_pages
(
zone
,
start_pfn
,
nr_pages
,
altmap
);
__remove_pages
(
start_pfn
,
nr_pages
,
altmap
);
}
#endif
arch/mips/Kconfig
View file @
cc02f499
...
...
@@ -46,7 +46,7 @@ config MIPS
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
select HAVE_ASM_MODVERSIONS
select HAVE_EBPF_JIT if
(
!CPU_MICROMIPS
)
select HAVE_EBPF_JIT if
64BIT &&
!CPU_MICROMIPS
&& TARGET_ISA_REV >= 2
select HAVE_CONTEXT_TRACKING
select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
...
...
arch/mips/boot/compressed/Makefile
View file @
cc02f499
...
...
@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
-DBOOT_HEAP_SIZE
=
$(BOOT_HEAP_SIZE)
\
-DKERNEL_ENTRY
=
$(VMLINUX_ENTRY_ADDRESS)
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT
:=
n
# decompressor objects (linked with vmlinuz)
vmlinuzobjs-y
:=
$(obj)
/head.o
$(obj)
/decompress.o
$(obj)
/string.o
...
...
arch/mips/include/asm/thread_info.h
View file @
cc02f499
...
...
@@ -49,8 +49,26 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
/* How to get the thread information struct from C. */
/*
* A pointer to the struct thread_info for the currently executing thread is
* held in register $28/$gp.
*
* We declare __current_thread_info as a global register variable rather than a
* local register variable within current_thread_info() because clang doesn't
* support explicit local register variables.
*
* When building the VDSO we take care not to declare the global register
* variable because this causes GCC to not preserve the value of $28/$gp in
* functions that change its value (which is common in the PIC VDSO when
* accessing the GOT). Since the VDSO shouldn't be accessing
* __current_thread_info anyway we declare it extern in order to cause a link
* failure if it's referenced.
*/
#ifdef __VDSO__
extern
struct
thread_info
*
__current_thread_info
;
#else
register
struct
thread_info
*
__current_thread_info
__asm__
(
"$28"
);
#endif
static
inline
struct
thread_info
*
current_thread_info
(
void
)
{
...
...
arch/mips/include/asm/vdso/gettimeofday.h
View file @
cc02f499
...
...
@@ -26,8 +26,6 @@
#define __VDSO_USE_SYSCALL ULLONG_MAX
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
static
__always_inline
long
gettimeofday_fallback
(
struct
__kernel_old_timeval
*
_tv
,
struct
timezone
*
_tz
)
...
...
@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
return
error
?
-
ret
:
ret
;
}
#else
static
__always_inline
long
gettimeofday_fallback
(
struct
__kernel_old_timeval
*
_tv
,
struct
timezone
*
_tz
)
{
return
-
1
;
}
#endif
static
__always_inline
long
clock_gettime_fallback
(
clockid_t
_clkid
,
struct
__kernel_timespec
*
_ts
)
...
...
arch/mips/kernel/cacheinfo.c
View file @
cc02f499
...
...
@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
return
0
;
}
static
void
fill_cpumask_siblings
(
int
cpu
,
cpumask_t
*
cpu_map
)
{
int
cpu1
;
for_each_possible_cpu
(
cpu1
)
if
(
cpus_are_siblings
(
cpu
,
cpu1
))
cpumask_set_cpu
(
cpu1
,
cpu_map
);
}
static
void
fill_cpumask_cluster
(
int
cpu
,
cpumask_t
*
cpu_map
)
{
int
cpu1
;
int
cluster
=
cpu_cluster
(
&
cpu_data
[
cpu
]);
for_each_possible_cpu
(
cpu1
)
if
(
cpu_cluster
(
&
cpu_data
[
cpu1
])
==
cluster
)
cpumask_set_cpu
(
cpu1
,
cpu_map
);
}
static
int
__populate_cache_leaves
(
unsigned
int
cpu
)
{
struct
cpuinfo_mips
*
c
=
&
current_cpu_data
;
...
...
@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
struct
cacheinfo
*
this_leaf
=
this_cpu_ci
->
info_list
;
if
(
c
->
icache
.
waysize
)
{
/* L1 caches are per core */
fill_cpumask_siblings
(
cpu
,
&
this_leaf
->
shared_cpu_map
);
populate_cache
(
dcache
,
this_leaf
,
1
,
CACHE_TYPE_DATA
);
fill_cpumask_siblings
(
cpu
,
&
this_leaf
->
shared_cpu_map
);
populate_cache
(
icache
,
this_leaf
,
1
,
CACHE_TYPE_INST
);
}
else
{
populate_cache
(
dcache
,
this_leaf
,
1
,
CACHE_TYPE_UNIFIED
);
}
if
(
c
->
scache
.
waysize
)
if
(
c
->
scache
.
waysize
)
{
/* L2 cache is per cluster */
fill_cpumask_cluster
(
cpu
,
&
this_leaf
->
shared_cpu_map
);
populate_cache
(
scache
,
this_leaf
,
2
,
CACHE_TYPE_UNIFIED
);
}
if
(
c
->
tcache
.
waysize
)
populate_cache
(
tcache
,
this_leaf
,
3
,
CACHE_TYPE_UNIFIED
);
...
...
arch/mips/net/ebpf_jit.c
View file @
cc02f499
...
...
@@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static
int
emit_bpf_tail_call
(
struct
jit_ctx
*
ctx
,
int
this_idx
)
{
int
off
,
b_off
;
int
tcc_reg
;
ctx
->
flags
|=
EBPF_SEEN_TC
;
/*
...
...
@@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
b_off
=
b_imm
(
this_idx
+
1
,
ctx
);
emit_instr
(
ctx
,
bne
,
MIPS_R_AT
,
MIPS_R_ZERO
,
b_off
);
/*
* if (
--
TCC < 0)
* if (TCC
--
< 0)
* goto out;
*/
/* Delay slot */
emit_instr
(
ctx
,
daddiu
,
MIPS_R_T5
,
(
ctx
->
flags
&
EBPF_TCC_IN_V1
)
?
MIPS_R_V1
:
MIPS_R_S4
,
-
1
);
tcc_reg
=
(
ctx
->
flags
&
EBPF_TCC_IN_V1
)
?
MIPS_R_V1
:
MIPS_R_S4
;
emit_instr
(
ctx
,
daddiu
,
MIPS_R_T5
,
tcc_reg
,
-
1
);
b_off
=
b_imm
(
this_idx
+
1
,
ctx
);
emit_instr
(
ctx
,
bltz
,
MIPS_R_T5
,
b_off
);
emit_instr
(
ctx
,
bltz
,
tcc_reg
,
b_off
);
/*
* prog = array->ptrs[index];
* if (prog == NULL)
...
...
@@ -1803,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned
int
image_size
;
u8
*
image_ptr
;
if
(
!
prog
->
jit_requested
||
MIPS_ISA_REV
<
2
)
if
(
!
prog
->
jit_requested
)
return
prog
;
tmp
=
bpf_jit_blind_constants
(
prog
);
...
...
arch/mips/pci/pci-xtalk-bridge.c
View file @
cc02f499
...
...
@@ -279,16 +279,15 @@ static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
struct
bridge_irq_chip_data
*
data
=
d
->
chip_data
;
int
bit
=
d
->
parent_data
->
hwirq
;
int
pin
=
d
->
hwirq
;
nasid_t
nasid
;
int
ret
,
cpu
;
ret
=
irq_chip_set_affinity_parent
(
d
,
mask
,
force
);
if
(
ret
>=
0
)
{
cpu
=
cpumask_first_and
(
mask
,
cpu_online_mask
);
nasid
=
COMPACT_TO_NASID_NODEID
(
cpu_to_node
(
cpu
));
data
->
n
nasid
=
COMPACT_TO_NASID_NODEID
(
cpu_to_node
(
cpu
));
bridge_write
(
data
->
bc
,
b_int_addr
[
pin
].
addr
,
(((
data
->
bc
->
intr_addr
>>
30
)
&
0x30000
)
|
bit
|
(
nasid
<<
8
)));
bit
|
(
data
->
nasid
<<
8
)));
bridge_read
(
data
->
bc
,
b_wid_tflush
);
}
return
ret
;
...
...
arch/mips/sgi-ip27/ip27-irq.c
View file @
cc02f499
...
...
@@ -73,6 +73,9 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
int
cpu
;
cpu
=
cpumask_first_and
(
mask
,
cpu_online_mask
);
if
(
cpu
>=
nr_cpu_ids
)
cpu
=
cpumask_any
(
cpu_online_mask
);
nasid
=
COMPACT_TO_NASID_NODEID
(
cpu_to_node
(
cpu
));
hd
->
cpu
=
cpu
;
if
(
!
cputoslice
(
cpu
))
{
...
...
@@ -139,6 +142,7 @@ static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
/* use CPU connected to nearest hub */
hub
=
hub_data
(
NASID_TO_COMPACT_NODEID
(
info
->
nasid
));
setup_hub_mask
(
hd
,
&
hub
->
h_cpus
);
info
->
nasid
=
cpu_to_node
(
hd
->
cpu
);
/* Make sure it's not already pending when we connect it. */
REMOTE_HUB_CLR_INTR
(
info
->
nasid
,
swlevel
);
...
...
arch/mips/vdso/vgettimeofday.c
View file @
cc02f499
...
...
@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
return
__cvdso_clock_gettime32
(
clock
,
ts
);
}
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
/*
* This is behind the ifdef so that we don't provide the symbol when there's no
* possibility of there being a usable clocksource, because there's nothing we
* can do without it. When libc fails the symbol lookup it should fall back on
* the standard syscall path.
*/
int
__vdso_gettimeofday
(
struct
__kernel_old_timeval
*
tv
,
struct
timezone
*
tz
)
{
return
__cvdso_gettimeofday
(
tv
,
tz
);
}
#endif
/* CONFIG_MIPS_CLOCK_VSYSCALL */
int
__vdso_clock_getres
(
clockid_t
clock_id
,
struct
old_timespec32
*
res
)
{
...
...
@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
return
__cvdso_clock_gettime
(
clock
,
ts
);
}
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
/*
* This is behind the ifdef so that we don't provide the symbol when there's no
* possibility of there being a usable clocksource, because there's nothing we
* can do without it. When libc fails the symbol lookup it should fall back on
* the standard syscall path.
*/
int
__vdso_gettimeofday
(
struct
__kernel_old_timeval
*
tv
,
struct
timezone
*
tz
)
{
return
__cvdso_gettimeofday
(
tv
,
tz
);
}
#endif
/* CONFIG_MIPS_CLOCK_VSYSCALL */
int
__vdso_clock_getres
(
clockid_t
clock_id
,
struct
__kernel_timespec
*
res
)
{
...
...
arch/nds32/include/asm/cacheflush.h
View file @
cc02f499
...
...
@@ -9,7 +9,11 @@
#define PG_dcache_dirty PG_arch_1
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
);
#define flush_icache_range flush_icache_range
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
);
#define flush_icache_page flush_icache_page
#ifdef CONFIG_CPU_CACHE_ALIASING
void
flush_cache_mm
(
struct
mm_struct
*
mm
);
void
flush_cache_dup_mm
(
struct
mm_struct
*
mm
);
...
...
@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
#include
<asm-generic/cacheflush.h>
#undef flush_icache_range
#undef flush_icache_page
#undef flush_icache_user_range
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
);
#define flush_icache_user_range flush_icache_user_range
#include
<asm-generic/cacheflush.h>
#endif
#endif
/* __NDS32_CACHEFLUSH_H__ */
Prev
1
2
3
4
5
6
7
…
35
Next
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment