Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
packages
kernel
linux
Commits
34cfdd4d
Commit
34cfdd4d
authored
Oct 07, 2018
by
Lorenzo "Palinuro" Faletra
Browse files
Import Upstream version 4.18.10
parent
441d7f18
Changes
705
Hide whitespace changes
Inline
Side-by-side
arch/x86/kernel/tsc.c
View file @
34cfdd4d
...
...
@@ -1343,7 +1343,7 @@ device_initcall(init_tsc_clocksource);
void
__init
tsc_early_delay_calibrate
(
void
)
{
u
nsigned
long
lpj
;
u
64
lpj
;
if
(
!
boot_cpu_has
(
X86_FEATURE_TSC
))
return
;
...
...
@@ -1355,7 +1355,7 @@ void __init tsc_early_delay_calibrate(void)
if
(
!
tsc_khz
)
return
;
lpj
=
tsc_khz
*
1000
;
lpj
=
(
u64
)
tsc_khz
*
1000
;
do_div
(
lpj
,
HZ
);
loops_per_jiffy
=
lpj
;
}
...
...
arch/x86/kvm/mmu.c
View file @
34cfdd4d
...
...
@@ -221,6 +221,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
PT64_EPT_EXECUTABLE_MASK
;
static
const
u64
shadow_acc_track_saved_bits_shift
=
PT64_SECOND_AVAIL_BITS_SHIFT
;
/*
* This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
* to guard against L1TF attacks.
*/
static
u64
__read_mostly
shadow_nonpresent_or_rsvd_mask
;
/*
* The number of high-order 1 bits to use in the mask above.
*/
static
const
u64
shadow_nonpresent_or_rsvd_mask_len
=
5
;
static
void
mmu_spte_set
(
u64
*
sptep
,
u64
spte
);
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_mask
,
u64
mmio_value
)
...
...
@@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
{
unsigned
int
gen
=
kvm_current_mmio_generation
(
vcpu
);
u64
mask
=
generation_mmio_spte_mask
(
gen
);
u64
gpa
=
gfn
<<
PAGE_SHIFT
;
access
&=
ACC_WRITE_MASK
|
ACC_USER_MASK
;
mask
|=
shadow_mmio_value
|
access
|
gfn
<<
PAGE_SHIFT
;
mask
|=
shadow_mmio_value
|
access
;
mask
|=
gpa
|
shadow_nonpresent_or_rsvd_mask
;
mask
|=
(
gpa
&
shadow_nonpresent_or_rsvd_mask
)
<<
shadow_nonpresent_or_rsvd_mask_len
;
trace_mark_mmio_spte
(
sptep
,
gfn
,
access
,
gen
);
mmu_spte_set
(
sptep
,
mask
);
...
...
@@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte)
static
gfn_t
get_mmio_spte_gfn
(
u64
spte
)
{
u64
mask
=
generation_mmio_spte_mask
(
MMIO_GEN_MASK
)
|
shadow_mmio_mask
;
return
(
spte
&
~
mask
)
>>
PAGE_SHIFT
;
u64
mask
=
generation_mmio_spte_mask
(
MMIO_GEN_MASK
)
|
shadow_mmio_mask
|
shadow_nonpresent_or_rsvd_mask
;
u64
gpa
=
spte
&
~
mask
;
gpa
|=
(
spte
>>
shadow_nonpresent_or_rsvd_mask_len
)
&
shadow_nonpresent_or_rsvd_mask
;
return
gpa
>>
PAGE_SHIFT
;
}
static
unsigned
get_mmio_spte_access
(
u64
spte
)
...
...
@@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL
(
kvm_mmu_set_mask_ptes
);
static
void
kvm_mmu_
clear
_all_pte_masks
(
void
)
static
void
kvm_mmu_
reset
_all_pte_masks
(
void
)
{
shadow_user_mask
=
0
;
shadow_accessed_mask
=
0
;
...
...
@@ -391,6 +412,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
shadow_mmio_mask
=
0
;
shadow_present_mask
=
0
;
shadow_acc_track_mask
=
0
;
/*
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*/
if
(
boot_cpu_data
.
x86_phys_bits
<
52
-
shadow_nonpresent_or_rsvd_mask_len
)
shadow_nonpresent_or_rsvd_mask
=
rsvd_bits
(
boot_cpu_data
.
x86_phys_bits
-
shadow_nonpresent_or_rsvd_mask_len
,
boot_cpu_data
.
x86_phys_bits
-
1
);
}
static
int
is_cpuid_PSE36
(
void
)
...
...
@@ -4927,7 +4960,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
int
kvm_mmu_page_fault
(
struct
kvm_vcpu
*
vcpu
,
gva_t
cr2
,
u64
error_code
,
void
*
insn
,
int
insn_len
)
{
int
r
,
emulation_type
=
EMULTYPE_RETRY
;
int
r
,
emulation_type
=
0
;
enum
emulation_result
er
;
bool
direct
=
vcpu
->
arch
.
mmu
.
direct_map
;
...
...
@@ -4940,10 +4973,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
r
=
RET_PF_INVALID
;
if
(
unlikely
(
error_code
&
PFERR_RSVD_MASK
))
{
r
=
handle_mmio_page_fault
(
vcpu
,
cr2
,
direct
);
if
(
r
==
RET_PF_EMULATE
)
{
emulation_type
=
0
;
if
(
r
==
RET_PF_EMULATE
)
goto
emulate
;
}
}
if
(
r
==
RET_PF_INVALID
)
{
...
...
@@ -4970,8 +5001,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
return
1
;
}
if
(
mmio_info_in_cache
(
vcpu
,
cr2
,
direct
))
emulation_type
=
0
;
/*
* vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
* optimistically try to just unprotect the page and let the processor
* re-execute the instruction that caused the page fault. Do not allow
* retrying MMIO emulation, as it's not only pointless but could also
* cause us to enter an infinite loop because the processor will keep
* faulting on the non-existent MMIO address. Retrying an instruction
* from a nested guest is also pointless and dangerous as we are only
* explicitly shadowing L1's page tables, i.e. unprotecting something
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
*/
if
(
!
mmio_info_in_cache
(
vcpu
,
cr2
,
direct
)
&&
!
is_guest_mode
(
vcpu
))
emulation_type
=
EMULTYPE_ALLOW_RETRY
;
emulate:
/*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
...
...
@@ -5500,7 +5542,7 @@ int kvm_mmu_module_init(void)
{
int
ret
=
-
ENOMEM
;
kvm_mmu_
clear
_all_pte_masks
();
kvm_mmu_
reset
_all_pte_masks
();
pte_list_desc_cache
=
kmem_cache_create
(
"pte_list_desc"
,
sizeof
(
struct
pte_list_desc
),
...
...
arch/x86/kvm/svm.c
View file @
34cfdd4d
...
...
@@ -3875,8 +3875,8 @@ static int emulate_on_interception(struct vcpu_svm *svm)
static
int
rsm_interception
(
struct
vcpu_svm
*
svm
)
{
return
x86
_emulate_instruction
(
&
svm
->
vcpu
,
0
,
0
,
rsm_ins_bytes
,
2
)
==
EMULATE_DONE
;
return
kvm
_emulate_instruction
_from_buffer
(
&
svm
->
vcpu
,
rsm_ins_bytes
,
2
)
==
EMULATE_DONE
;
}
static
int
rdpmc_interception
(
struct
vcpu_svm
*
svm
)
...
...
arch/x86/kvm/vmx.c
View file @
34cfdd4d
...
...
@@ -197,12 +197,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_
static
const
struct
{
const
char
*
option
;
enum
vmx_l1d_flush_state
cmd
;
bool
for_parse
;
}
vmentry_l1d_param
[]
=
{
{
"auto"
,
VMENTER_L1D_FLUSH_AUTO
},
{
"never"
,
VMENTER_L1D_FLUSH_NEVER
},
{
"cond"
,
VMENTER_L1D_FLUSH_COND
},
{
"always"
,
VMENTER_L1D_FLUSH_ALWAYS
},
[
VMENTER_L1D_FLUSH_AUTO
]
=
{
"auto"
,
true
},
[
VMENTER_L1D_FLUSH_NEVER
]
=
{
"never"
,
true
},
[
VMENTER_L1D_FLUSH_COND
]
=
{
"cond"
,
true
},
[
VMENTER_L1D_FLUSH_ALWAYS
]
=
{
"always"
,
true
},
[
VMENTER_L1D_FLUSH_EPT_DISABLED
]
=
{
"EPT disabled"
,
false
},
[
VMENTER_L1D_FLUSH_NOT_REQUIRED
]
=
{
"not required"
,
false
},
};
#define L1D_CACHE_ORDER 4
...
...
@@ -286,8 +288,9 @@ static int vmentry_l1d_flush_parse(const char *s)
if
(
s
)
{
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
vmentry_l1d_param
);
i
++
)
{
if
(
sysfs_streq
(
s
,
vmentry_l1d_param
[
i
].
option
))
return
vmentry_l1d_param
[
i
].
cmd
;
if
(
vmentry_l1d_param
[
i
].
for_parse
&&
sysfs_streq
(
s
,
vmentry_l1d_param
[
i
].
option
))
return
i
;
}
}
return
-
EINVAL
;
...
...
@@ -297,13 +300,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
{
int
l1tf
,
ret
;
if
(
!
boot_cpu_has
(
X86_BUG_L1TF
))
return
0
;
l1tf
=
vmentry_l1d_flush_parse
(
s
);
if
(
l1tf
<
0
)
return
l1tf
;
if
(
!
boot_cpu_has
(
X86_BUG_L1TF
))
return
0
;
/*
* Has vmx_init() run already? If not then this is the pre init
* parameter parsing. In that case just store the value and let
...
...
@@ -323,6 +326,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
static
int
vmentry_l1d_flush_get
(
char
*
s
,
const
struct
kernel_param
*
kp
)
{
if
(
WARN_ON_ONCE
(
l1tf_vmx_mitigation
>=
ARRAY_SIZE
(
vmentry_l1d_param
)))
return
sprintf
(
s
,
"???
\n
"
);
return
sprintf
(
s
,
"%s
\n
"
,
vmentry_l1d_param
[
l1tf_vmx_mitigation
].
option
);
}
...
...
@@ -933,17 +939,21 @@ struct vcpu_vmx {
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
* guest (L2), it points to a different VMCS.
* guest (L2), it points to a different VMCS. loaded_cpu_state points
* to the VMCS whose state is loaded into the CPU registers that only
* need to be switched when transitioning to/from the kernel; a NULL
* value indicates that host state is loaded.
*/
struct
loaded_vmcs
vmcs01
;
struct
loaded_vmcs
*
loaded_vmcs
;
struct
loaded_vmcs
*
loaded_cpu_state
;
bool
__launched
;
/* temporary, used in vmx_vcpu_run */
struct
msr_autoload
{
struct
vmx_msrs
guest
;
struct
vmx_msrs
host
;
}
msr_autoload
;
struct
{
int
loaded
;
u16
fs_sel
,
gs_sel
,
ldt_sel
;
#ifdef CONFIG_X86_64
u16
ds_sel
,
es_sel
;
...
...
@@ -2744,10 +2754,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif
int
i
;
if
(
vmx
->
host_state
.
loaded
)
if
(
vmx
->
loaded_cpu_state
)
return
;
vmx
->
host_state
.
loaded
=
1
;
vmx
->
loaded_cpu_state
=
vmx
->
loaded_vmcs
;
/*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
...
...
@@ -2809,11 +2820,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
static
void
__vmx_load_host_state
(
struct
vcpu_vmx
*
vmx
)
{
if
(
!
vmx
->
host_state
.
loaded
)
if
(
!
vmx
->
loaded_cpu_state
)
return
;
WARN_ON_ONCE
(
vmx
->
loaded_cpu_state
!=
vmx
->
loaded_vmcs
);
++
vmx
->
vcpu
.
stat
.
host_state_reload
;
vmx
->
host_state
.
loaded
=
0
;
vmx
->
loaded_cpu_state
=
NULL
;
#ifdef CONFIG_X86_64
if
(
is_long_mode
(
&
vmx
->
vcpu
))
rdmsrl
(
MSR_KERNEL_GS_BASE
,
vmx
->
msr_guest_kernel_gs_base
);
...
...
@@ -7525,8 +7539,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
if
(
!
static_cpu_has
(
X86_FEATURE_HYPERVISOR
))
return
kvm_skip_emulated_instruction
(
vcpu
);
else
return
x86_
emulate_instruction
(
vcpu
,
gpa
,
EMULTYPE_SKIP
,
NULL
,
0
)
==
EMULATE_DONE
;
return
emulate_instruction
(
vcpu
,
EMULTYPE_SKIP
)
==
EMULATE_DONE
;
}
return
kvm_mmu_page_fault
(
vcpu
,
gpa
,
PFERR_RSVD_MASK
,
NULL
,
0
);
...
...
@@ -8109,7 +8123,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
/* CPL=0 must be checked manually. */
if
(
vmx_get_cpl
(
vcpu
))
{
kvm_
queue_exception
(
vcpu
,
UD_VECTOR
);
kvm_
inject_gp
(
vcpu
,
0
);
return
1
;
}
...
...
@@ -8173,7 +8187,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
static
int
nested_vmx_check_permission
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
vmx_get_cpl
(
vcpu
))
{
kvm_
queue_exception
(
vcpu
,
UD_VECTOR
);
kvm_
inject_gp
(
vcpu
,
0
);
return
0
;
}
...
...
@@ -10511,8 +10525,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
return
;
cpu
=
get_cpu
();
vmx
->
loaded_vmcs
=
vmcs
;
vmx_vcpu_put
(
vcpu
);
vmx
->
loaded_vmcs
=
vmcs
;
vmx_vcpu_load
(
vcpu
,
cpu
);
put_cpu
();
}
...
...
arch/x86/kvm/x86.c
View file @
34cfdd4d
...
...
@@ -5810,7 +5810,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
gpa_t
gpa
=
cr2
;
kvm_pfn_t
pfn
;
if
(
emulation_type
&
EMULTYPE_NO_REEXECUTE
)
if
(
!
(
emulation_type
&
EMULTYPE_ALLOW_RETRY
))
return
false
;
if
(
WARN_ON_ONCE
(
is_guest_mode
(
vcpu
)))
return
false
;
if
(
!
vcpu
->
arch
.
mmu
.
direct_map
)
{
...
...
@@ -5898,7 +5901,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
*/
vcpu
->
arch
.
last_retry_eip
=
vcpu
->
arch
.
last_retry_addr
=
0
;
if
(
!
(
emulation_type
&
EMULTYPE_RETRY
))
if
(
!
(
emulation_type
&
EMULTYPE_ALLOW_RETRY
))
return
false
;
if
(
WARN_ON_ONCE
(
is_guest_mode
(
vcpu
)))
return
false
;
if
(
x86_page_table_writing_insn
(
ctxt
))
...
...
@@ -6506,20 +6512,22 @@ static void kvm_set_mmio_spte_mask(void)
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/* Mask the reserved physical address bits. */
mask
=
rsvd_bits
(
maxphyaddr
,
51
);
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask
=
1ull
<<
51
;
/* Set the present bit. */
mask
|=
1ull
;
#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if
(
maxphyaddr
==
52
)
if
(
IS_ENABLED
(
CONFIG_X86_64
)
&&
maxphyaddr
==
52
)
mask
&=
~
1ull
;
#endif
kvm_mmu_set_mmio_spte_mask
(
mask
,
mask
);
}
...
...
arch/x86/mm/fault.c
View file @
34cfdd4d
...
...
@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
if
(
!
(
address
>=
VMALLOC_START
&&
address
<
VMALLOC_END
))
return
-
1
;
WARN_ON_ONCE
(
in_nmi
());
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
...
...
arch/x86/mm/pti.c
View file @
34cfdd4d
...
...
@@ -177,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
if
(
pgd_none
(
*
pgd
))
{
unsigned
long
new_p4d_page
=
__get_free_page
(
gfp
);
if
(
!
new_p4d_page
)
if
(
WARN_ON_ONCE
(
!
new_p4d_page
)
)
return
NULL
;
set_pgd
(
pgd
,
__pgd
(
_KERNPG_TABLE
|
__pa
(
new_p4d_page
)));
...
...
@@ -196,13 +196,17 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
static
pmd_t
*
pti_user_pagetable_walk_pmd
(
unsigned
long
address
)
{
gfp_t
gfp
=
(
GFP_KERNEL
|
__GFP_NOTRACK
|
__GFP_ZERO
);
p4d_t
*
p4d
=
pti_user_pagetable_walk_p4d
(
address
)
;
p4d_t
*
p4d
;
pud_t
*
pud
;
p4d
=
pti_user_pagetable_walk_p4d
(
address
);
if
(
!
p4d
)
return
NULL
;
BUILD_BUG_ON
(
p4d_large
(
*
p4d
)
!=
0
);
if
(
p4d_none
(
*
p4d
))
{
unsigned
long
new_pud_page
=
__get_free_page
(
gfp
);
if
(
!
new_pud_page
)
if
(
WARN_ON_ONCE
(
!
new_pud_page
)
)
return
NULL
;
set_p4d
(
p4d
,
__p4d
(
_KERNPG_TABLE
|
__pa
(
new_pud_page
)));
...
...
@@ -216,7 +220,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
}
if
(
pud_none
(
*
pud
))
{
unsigned
long
new_pmd_page
=
__get_free_page
(
gfp
);
if
(
!
new_pmd_page
)
if
(
WARN_ON_ONCE
(
!
new_pmd_page
)
)
return
NULL
;
set_pud
(
pud
,
__pud
(
_KERNPG_TABLE
|
__pa
(
new_pmd_page
)));
...
...
@@ -238,9 +242,13 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
static
__init
pte_t
*
pti_user_pagetable_walk_pte
(
unsigned
long
address
)
{
gfp_t
gfp
=
(
GFP_KERNEL
|
__GFP_NOTRACK
|
__GFP_ZERO
);
pmd_t
*
pmd
=
pti_user_pagetable_walk_pmd
(
address
)
;
pmd_t
*
pmd
;
pte_t
*
pte
;
pmd
=
pti_user_pagetable_walk_pmd
(
address
);
if
(
!
pmd
)
return
NULL
;
/* We can't do anything sensible if we hit a large mapping. */
if
(
pmd_large
(
*
pmd
))
{
WARN_ON
(
1
);
...
...
@@ -298,6 +306,10 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
p4d_t
*
p4d
;
pud_t
*
pud
;
/* Overflow check */
if
(
addr
<
start
)
break
;
pgd
=
pgd_offset_k
(
addr
);
if
(
WARN_ON
(
pgd_none
(
*
pgd
)))
return
;
...
...
@@ -355,6 +367,9 @@ static void __init pti_clone_p4d(unsigned long addr)
pgd_t
*
kernel_pgd
;
user_p4d
=
pti_user_pagetable_walk_p4d
(
addr
);
if
(
!
user_p4d
)
return
;
kernel_pgd
=
pgd_offset_k
(
addr
);
kernel_p4d
=
p4d_offset
(
kernel_pgd
,
addr
);
*
user_p4d
=
*
kernel_p4d
;
...
...
arch/x86/xen/mmu_pv.c
View file @
34cfdd4d
...
...
@@ -434,14 +434,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
static
void
xen_set_pte_atomic
(
pte_t
*
ptep
,
pte_t
pte
)
{
trace_xen_mmu_set_pte_atomic
(
ptep
,
pte
);
set_64bit
((
u64
*
)
ptep
,
native_pte_val
(
pte
)
);
__xen_set_pte
(
ptep
,
pte
);
}
static
void
xen_pte_clear
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
)
{
trace_xen_mmu_pte_clear
(
mm
,
addr
,
ptep
);
if
(
!
xen_batched_set_pte
(
ptep
,
native_make_pte
(
0
)))
native_pte_clear
(
mm
,
addr
,
ptep
);
__xen_set_pte
(
ptep
,
native_make_pte
(
0
));
}
static
void
xen_pmd_clear
(
pmd_t
*
pmdp
)
...
...
@@ -1571,7 +1570,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
pte
=
__pte_ma
(((
pte_val_ma
(
*
ptep
)
&
_PAGE_RW
)
|
~
_PAGE_RW
)
&
pte_val_ma
(
pte
));
#endif
native
_set_pte
(
ptep
,
pte
);
__xen
_set_pte
(
ptep
,
pte
);
}
/* Early in boot, while setting up the initial pagetable, assume
...
...
arch/xtensa/include/asm/cacheasm.h
View file @
34cfdd4d
...
...
@@ -31,16 +31,32 @@
*
*/
.
macro
__loop_cache_all
ar
at
insn
size
line_width
movi
\
ar
,
0
.
macro
__loop_cache_unroll
ar
at
insn
size
line_width
max_immed
.
if
(
1
<<
(
\
line_width
))
>
(
\
max_immed
)
.
set
_reps
,
1
.
elseif
(
2
<<
(
\
line_width
))
>
(
\
max_immed
)
.
set
_reps
,
2
.
else
.
set
_reps
,
4
.
endif
__loopi
\
ar
,
\
at
,
\
size
,
(
_reps
<<
(
\
line_width
))
.
set
_index
,
0
.
rep
_reps
\
insn
\
ar
,
_index
<<
(
\
line_width
)
.
set
_index
,
_index
+
1
.
endr
__endla
\
ar
,
\
at
,
_reps
<<
(
\
line_width
)
.
endm
__loopi
\
ar
,
\
at
,
\
size
,
(
4
<<
(
\
line_width
))
\
insn
\
ar
,
0
<<
(
\
line_width
)
\
insn
\
ar
,
1
<<
(
\
line_width
)
\
insn
\
ar
,
2
<<
(
\
line_width
)
\
insn
\
ar
,
3
<<
(
\
line_width
)
__endla
\
ar
,
\
at
,
4
<<
(
\
line_width
)
.
macro
__loop_cache_all
ar
at
insn
size
line_width
max_immed
movi
\
ar
,
0
__loop_cache_unroll
\
ar
,
\
at
,
\
insn
,
\
size
,
\
line_width
,
\
max_immed
.
endm
...
...
@@ -57,14 +73,9 @@
.
endm
.
macro
__loop_cache_page
ar
at
insn
line_width
.
macro
__loop_cache_page
ar
at
insn
line_width
max_immed
__loopi
\
ar
,
\
at
,
PAGE_SIZE
,
4
<<
(
\
line_width
)
\
insn
\
ar
,
0
<<
(
\
line_width
)
\
insn
\
ar
,
1
<<
(
\
line_width
)
\
insn
\
ar
,
2
<<
(
\
line_width
)
\
insn
\
ar
,
3
<<
(
\
line_width
)
__endla
\
ar
,
\
at
,
4
<<
(
\
line_width
)
__loop_cache_unroll
\
ar
,
\
at
,
\
insn
,
PAGE_SIZE
,
\
line_width
,
\
max_immed
.
endm
...
...
@@ -72,7 +83,8 @@
.
macro
___unlock_dcache_all
ar
at
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all
\
ar
\
at
diu
XCHAL_DCACHE_SIZE
XCHAL_DCACHE_LINEWIDTH
__loop_cache_all
\
ar
\
at
diu
XCHAL_DCACHE_SIZE
\
XCHAL_DCACHE_LINEWIDTH
240
#endif
.
endm
...
...
@@ -81,7 +93,8 @@
.
macro
___unlock_icache_all
ar
at
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all
\
ar
\
at
iiu
XCHAL_ICACHE_SIZE
XCHAL_ICACHE_LINEWIDTH
__loop_cache_all
\
ar
\
at
iiu
XCHAL_ICACHE_SIZE
\
XCHAL_ICACHE_LINEWIDTH
240
#endif
.
endm
...
...
@@ -90,7 +103,8 @@
.
macro
___flush_invalidate_dcache_all
ar
at
#if XCHAL_DCACHE_SIZE
__loop_cache_all
\
ar
\
at
diwbi
XCHAL_DCACHE_SIZE
XCHAL_DCACHE_LINEWIDTH
__loop_cache_all
\
ar
\
at
diwbi
XCHAL_DCACHE_SIZE
\
XCHAL_DCACHE_LINEWIDTH
240
#endif
.
endm
...
...
@@ -99,7 +113,8 @@
.
macro
___flush_dcache_all
ar
at
#if XCHAL_DCACHE_SIZE
__loop_cache_all
\
ar
\
at
diwb
XCHAL_DCACHE_SIZE
XCHAL_DCACHE_LINEWIDTH
__loop_cache_all
\
ar
\
at
diwb
XCHAL_DCACHE_SIZE
\
XCHAL_DCACHE_LINEWIDTH
240
#endif
.
endm
...
...
@@ -108,8 +123,8 @@
.
macro
___invalidate_dcache_all
ar
at
#if XCHAL_DCACHE_SIZE
__loop_cache_all
\
ar
\
at
dii
__stringify
(
DCACHE_
WAY_
SIZE
)
\
XCHAL_DCACHE_LINEWIDTH
__loop_cache_all
\
ar
\
at
dii
XCHAL_
DCACHE_SIZE
\
XCHAL_DCACHE_LINEWIDTH
1020
#endif
.
endm
...
...
@@ -118,8 +133,8 @@
.
macro
___invalidate_icache_all
ar
at
#if XCHAL_ICACHE_SIZE
__loop_cache_all
\
ar
\
at
iii
__stringify
(
ICACHE_
WAY_
SIZE
)
\
XCHAL_ICACHE_LINEWIDTH
__loop_cache_all
\
ar
\
at
iii
XCHAL_
ICACHE_SIZE
\
XCHAL_ICACHE_LINEWIDTH
1020
#endif
.
endm
...
...
@@ -166,7 +181,7 @@
.
macro
___flush_invalidate_dcache_page
ar
as
#if XCHAL_DCACHE_SIZE
__loop_cache_page
\
ar
\
as
dhwbi
XCHAL_DCACHE_LINEWIDTH
__loop_cache_page
\
ar
\
as
dhwbi
XCHAL_DCACHE_LINEWIDTH
1020
#endif
.
endm
...
...
@@ -175,7 +190,7 @@
.
macro
___flush_dcache_page
ar
as
#if XCHAL_DCACHE_SIZE
__loop_cache_page
\
ar
\
as
dhwb
XCHAL_DCACHE_LINEWIDTH
__loop_cache_page
\
ar
\
as
dhwb
XCHAL_DCACHE_LINEWIDTH
1020
#endif
.
endm
...
...
@@ -184,7 +199,7 @@
.
macro
___invalidate_dcache_page
ar
as
#if XCHAL_DCACHE_SIZE
__loop_cache_page
\
ar
\
as
dhi
XCHAL_DCACHE_LINEWIDTH
__loop_cache_page
\
ar
\
as
dhi
XCHAL_DCACHE_LINEWIDTH
1020
#endif
.
endm
...
...
@@ -193,7 +208,7 @@
.
macro
___invalidate_icache_page
ar
as
#if XCHAL_ICACHE_SIZE
__loop_cache_page
\
ar
\
as
ihi
XCHAL_ICACHE_LINEWIDTH
__loop_cache_page
\
ar
\
as
ihi
XCHAL_ICACHE_LINEWIDTH
1020
#endif
.
endm
arch/xtensa/platforms/iss/setup.c
View file @
34cfdd4d
...
...
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
void
__init
platform_setup
(
char
**
p_cmdline
)
{
static
void
*
argv
[
COMMAND_LINE_SIZE
/
sizeof
(
void
*
)]
__initdata
;
static
char
cmdline
[
COMMAND_LINE_SIZE
]
__initdata
;
int
argc
=
simc_argc
();
int
argv_size
=
simc_argv_size
();