Commit 086e5551 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 5.4.8

parent d2ab784c
......@@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r7, VCPU_GPR(R7)(r4)
bne ret_to_ultra
lwz r0, VCPU_CR(r4)
ld r0, VCPU_CR(r4)
mtcr r0
ld r0, VCPU_GPR(R0)(r4)
......@@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
* R3 = UV_RETURN
*/
ret_to_ultra:
lwz r0, VCPU_CR(r4)
ld r0, VCPU_CR(r4)
mtcr r0
ld r0, VCPU_GPR(R3)(r4)
......
......@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
_GLOBAL(__clear_user)
_GLOBAL(__arch_clear_user)
/*
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
......@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
EX_TABLE(8b, 91b)
EX_TABLE(9b, 91b)
EXPORT_SYMBOL(__clear_user)
EXPORT_SYMBOL(__arch_clear_user)
......@@ -17,7 +17,7 @@ PPC64_CACHES:
.section ".text"
/**
* __clear_user: - Zero a block of memory in user space, with less checking.
* __arch_clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
......@@ -58,7 +58,7 @@ err3; stb r0,0(r3)
mr r3,r4
blr
_GLOBAL_TOC(__clear_user)
_GLOBAL_TOC(__arch_clear_user)
cmpdi r4,32
neg r6,r3
li r0,0
......@@ -181,4 +181,4 @@ err1; dcbz 0,r3
cmpdi r4,32
blt .Lshort_clear
b .Lmedium_clear
EXPORT_SYMBOL(__clear_user)
EXPORT_SYMBOL(__arch_clear_user)
......@@ -294,10 +294,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
if (ret == -1) {
/* Try to remove a non bolted entry */
ret = mmu_hash_ops.hpte_remove(hpteg);
if (ret != -1)
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
}
if (ret < 0)
break;
cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
......
......@@ -411,6 +411,10 @@ static struct bus_type cmm_subsys = {
.dev_name = "cmm",
};
static void cmm_release_device(struct device *dev)
{
}
/**
* cmm_sysfs_register - Register with sysfs
*
......@@ -426,6 +430,7 @@ static int cmm_sysfs_register(struct device *dev)
dev->id = 0;
dev->bus = &cmm_subsys;
dev->release = cmm_release_device;
if ((rc = device_register(dev)))
goto subsys_unregister;
......
......@@ -152,7 +152,7 @@ static int papr_scm_meta_get(struct papr_scm_priv *p,
int len, read;
int64_t ret;
if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
return -EINVAL;
for (len = hdr->in_length; len; len -= read) {
......@@ -206,7 +206,7 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
__be64 data_be;
int64_t ret;
if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
return -EINVAL;
for (len = hdr->in_length; len; len -= wrote) {
......
......@@ -20,7 +20,7 @@ objdump="$1"
vmlinux="$2"
bad_relocs=$(
"$objdump" -R "$vmlinux" |
$objdump -R "$vmlinux" |
# Only look at relocation lines.
grep -E '\<R_' |
# These relocations are okay
......
......@@ -18,14 +18,14 @@ vmlinux="$2"
#__end_interrupts should be located within the first 64K
end_intr=0x$(
"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
--stop-address=0xc000000000010000 |
grep '\<__end_interrupts>:' |
awk '{print $1}'
)
BRANCHES=$(
"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
--stop-address=${end_intr} |
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
grep -v '\<__start_initialization_multiplatform>' |
......
......@@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int n, mbl_offset;
unsigned int n;
int mbl_offset;
n = ctx->count % bsize;
bits = ctx->count * 8;
mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32);
mbl_offset = s390_crypto_shash_parmsize(ctx->func);
if (mbl_offset < 0)
return -EINVAL;
mbl_offset = mbl_offset / sizeof(u32);
/* set total msg bit length (mbl) in CPACF parmblock */
switch (ctx->func) {
case CPACF_KLMD_SHA_1:
......
......@@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION2_ENTRY_EMPTY);
return (p4d_t *) table;
}
#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
if (!mm_p4d_folded(mm))
crst_table_free(mm, (unsigned long *) p4d);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
......@@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table;
}
#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
if (!mm_pud_folded(mm))
crst_table_free(mm, (unsigned long *) pud);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
......@@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
if (mm_pmd_folded(mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
......
......@@ -10,8 +10,9 @@
#ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H
#include <asm/lowcore.h>
#include <linux/preempt.h>
#include <linux/time64.h>
#include <asm/lowcore.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
......@@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8);
/**
* get_clock_monotonic - returns current time in clock rate units
*
* The caller must ensure that preemption is disabled.
* The clock and tod_clock_base get changed via stop_machine.
* Therefore preemption must be disabled when calling this
* function, otherwise the returned value is not guaranteed to
* be monotonic.
* Therefore preemption must be disabled, otherwise the returned
* value is not guaranteed to be monotonic.
*/
static inline unsigned long long get_tod_clock_monotonic(void)
{
return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
unsigned long long tod;
preempt_disable_notrace();
tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
preempt_enable_notrace();
return tod;
}
/**
......
......@@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
else if (operand->flags & OPERAND_PCREL)
ptr += sprintf(ptr, "%lx", (signed int) value
+ addr);
else if (operand->flags & OPERAND_SIGNED)
else if (operand->flags & OPERAND_PCREL) {
void *pcrel = (void *)((int)value + addr);
ptr += sprintf(ptr, "%px", pcrel);
} else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
else
ptr += sprintf(ptr, "%u", value);
......@@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
ptr += sprintf(ptr, "%016lx: ", addr);
ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
......@@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
opsize = insn_length(*code);
if (opsize > len)
break;
ptr += sprintf(ptr, "%p: ", code);
ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
......
......@@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image)
#ifdef CONFIG_CRASH_DUMP
int rc;
preempt_disable();
rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
preempt_enable();
return rc == 0;
#else
return false;
......
......@@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
static int __hw_perf_event_init(struct perf_event *event)
static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
......@@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event)
int err = 0;
u64 ev;
switch (attr->type) {
switch (type) {
case PERF_TYPE_RAW:
/* Raw events are used to access counters directly,
* hence do not permit excludes */
......@@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
unsigned int type = event->attr.type;
int err;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
case PERF_TYPE_RAW:
err = __hw_perf_event_init(event);
break;
default:
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
/* Registered as unknown PMU */
err = __hw_perf_event_init(event, PERF_TYPE_RAW);
else
return -ENOENT;
}
if (unlikely(err) && event->destroy)
event->destroy(event);
......@@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void)
return -ENODEV;
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
return rc;
......
......@@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event)
int err = -ENOENT;
debug_sprintf_event(cf_diag_dbg, 5,
"%s event %p cpu %d config %#llx "
"%s event %p cpu %d config %#llx type:%u "
"sample_type %#llx cf_diag_events %d\n", __func__,
event, event->cpu, attr->config, attr->sample_type,
atomic_read(&cf_diag_events));
event, event->cpu, attr->config, event->pmu->type,
attr->sample_type, atomic_read(&cf_diag_events));
if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
event->attr.type != PERF_TYPE_RAW)
event->attr.type != event->pmu->type)
goto out;
/* Raw events are used to access counters directly,
......@@ -693,7 +693,7 @@ static int __init cf_diag_init(void)
}
debug_register_view(cf_diag_dbg, &debug_sprintf_view);
rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW);
rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) {
debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
debug_unregister(cf_diag_dbg);
......
......@@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
unsigned long *new, *tail;
unsigned long *new, *tail, *tail_prev = NULL;
if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
......@@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = (unsigned long)(void *) new + 1;
tail_prev = tail;
tail = new;
}
......@@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
if (rc)
if (rc) {
/* Undo last SDBT. An SDBT with no SDB at its first
* entry but with an SDBT entry instead can not be
* handled by the interrupt handler code.
* Avoid this situation.
*/
if (tail_prev) {
sfb->num_sdbt--;
free_page((unsigned long) new);
tail = tail_prev;
}
break;
}
sfb->num_sdb++;
tail++;
tail_prev = new = NULL; /* Allocated at least one SBD */
}
/* Link sampling buffer to its origin */
......
......@@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, current, regs, 0)
perf_callchain_store(entry, state.ip);
unwind_for_each_frame(&state, current, regs, 0) {
addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
}
}
/* Perf definitions for PMU event attributes in sysfs */
......
......@@ -60,6 +60,11 @@ bool unwind_next_frame(struct unwind_state *state)
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false;
regs = NULL;
if (!__kernel_text_address(ip)) {
/* skip bogus %r14 */
state->regs = NULL;
return unwind_next_frame(state);
}
} else {
sf = (struct stack_frame *) state->sp;
sp = READ_ONCE_NOCHECK(sf->back_chain);
......
......@@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size)
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
}
static int __memcpy_real(void *dest, void *src, size_t count)
static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
......@@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count)
return rc;
}
static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
unsigned long count)
static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
unsigned long src,
unsigned long count)
{
int irqs_disabled, rc;
unsigned long flags;
if (!count)
return 0;
flags = __arch_local_irq_stnsm(0xf8UL);
flags = arch_local_irq_save();
irqs_disabled = arch_irqs_disabled_flags(flags);
if (!irqs_disabled)
trace_hardirqs_off();
__arch_local_irq_stnsm(0xf8); // disable DAT
rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
if (flags & PSW_MASK_DAT)
__arch_local_irq_stosm(0x04); // enable DAT
if (!irqs_disabled)
trace_hardirqs_on();
__arch_local_irq_ssm(flags);
......@@ -115,9 +119,15 @@ static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
*/
int memcpy_real(void *dest, void *src, size_t count)
{
if (S390_lowcore.nodat_stack != 0)
return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
3, dest, src, count);
int rc;
if (S390_lowcore.nodat_stack != 0) {
preempt_disable();
rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
dest, src, count);
preempt_enable();
return rc;
}
/*
* This is a really early memcpy_real call, the stacks are
* not set up yet. Just call _memcpy_real on the early boot
......
......@@ -23,6 +23,7 @@
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/bpf.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
#include <asm/facility.h>
......@@ -1369,7 +1370,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
}
memset(&jit, 0, sizeof(jit));
jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) {
fp = orig_fp;
goto out;
......@@ -1422,7 +1423,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
if (!fp->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
free_addrs:
kfree(jit.addrs);
kvfree(jit.addrs);
kfree(jit_data);
fp->aux->jit_data = NULL;
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment