Commit 591effbd authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 4.17.17

parent 156ae521
......@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/of.h>
......
......@@ -334,6 +334,7 @@ static void __init prom_print_dec(unsigned long val)
call_prom("write", 3, 1, prom.stdout, buf+i, size);
}
__printf(1, 2)
static void __init prom_printf(const char *format, ...)
{
const char *p, *q, *s;
......@@ -1160,7 +1161,7 @@ static void __init prom_send_capabilities(void)
*/
cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
cores, NR_CPUS);
ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
......@@ -1242,7 +1243,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
if (align)
base = _ALIGN_UP(base, align);
prom_debug("alloc_up(%x, %x)\n", size, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align);
if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
......@@ -1253,7 +1254,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
for(; (base + size) <= alloc_top;
base = _ALIGN_UP(base + 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
break;
......@@ -1265,12 +1266,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
return 0;
alloc_bottom = addr + size;
prom_debug(" -> %x\n", addr);
prom_debug(" alloc_bottom : %x\n", alloc_bottom);
prom_debug(" alloc_top : %x\n", alloc_top);
prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
prom_debug(" rmo_top : %x\n", rmo_top);
prom_debug(" ram_top : %x\n", ram_top);
prom_debug(" -> %lx\n", addr);
prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
prom_debug(" alloc_top : %lx\n", alloc_top);
prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
prom_debug(" rmo_top : %lx\n", rmo_top);
prom_debug(" ram_top : %lx\n", ram_top);
return addr;
}
......@@ -1285,7 +1286,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
{
unsigned long base, addr = 0;
prom_debug("alloc_down(%x, %x, %s)\n", size, align,
prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
highmem ? "(high)" : "(low)");
if (ram_top == 0)
prom_panic("alloc_down() called with mem not initialized\n");
......@@ -1313,7 +1314,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
base = _ALIGN_DOWN(alloc_top - size, align);
for (; base > alloc_bottom;
base = _ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
break;
......@@ -1324,12 +1325,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
alloc_top = addr;
bail:
prom_debug(" -> %x\n", addr);
prom_debug(" alloc_bottom : %x\n", alloc_bottom);
prom_debug(" alloc_top : %x\n", alloc_top);
prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
prom_debug(" rmo_top : %x\n", rmo_top);
prom_debug(" ram_top : %x\n", ram_top);
prom_debug(" -> %lx\n", addr);
prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
prom_debug(" alloc_top : %lx\n", alloc_top);
prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
prom_debug(" rmo_top : %lx\n", rmo_top);
prom_debug(" ram_top : %lx\n", ram_top);
return addr;
}
......@@ -1455,7 +1456,7 @@ static void __init prom_init_mem(void)
if (size == 0)
continue;
prom_debug(" %x %x\n", base, size);
prom_debug(" %lx %lx\n", base, size);
if (base == 0 && (of_platform & PLATFORM_LPAR))
rmo_top = size;
if ((base + size) > ram_top)
......@@ -1475,12 +1476,12 @@ static void __init prom_init_mem(void)
if (prom_memory_limit) {
if (prom_memory_limit <= alloc_bottom) {
prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
prom_memory_limit);
prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
prom_memory_limit);
prom_memory_limit = 0;
} else if (prom_memory_limit >= ram_top) {
prom_printf("Ignoring mem=%x >= ram_top.\n",
prom_memory_limit);
prom_printf("Ignoring mem=%lx >= ram_top.\n",
prom_memory_limit);
prom_memory_limit = 0;
} else {
ram_top = prom_memory_limit;
......@@ -1512,12 +1513,13 @@ static void __init prom_init_mem(void)
alloc_bottom = PAGE_ALIGN(prom_initrd_end);
prom_printf("memory layout at init:\n");
prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
prom_printf(" alloc_bottom : %x\n", alloc_bottom);
prom_printf(" alloc_top : %x\n", alloc_top);
prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
prom_printf(" rmo_top : %x\n", rmo_top);
prom_printf(" ram_top : %x\n", ram_top);
prom_printf(" memory_limit : %lx (16 MB aligned)\n",
prom_memory_limit);
prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
prom_printf(" alloc_top : %lx\n", alloc_top);
prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
prom_printf(" rmo_top : %lx\n", rmo_top);
prom_printf(" ram_top : %lx\n", ram_top);
}
static void __init prom_close_stdin(void)
......@@ -1578,7 +1580,7 @@ static void __init prom_instantiate_opal(void)
return;
}
prom_printf("instantiating opal at 0x%x...", base);
prom_printf("instantiating opal at 0x%llx...", base);
if (call_prom_ret("call-method", 4, 3, rets,
ADDR("load-opal-runtime"),
......@@ -1594,10 +1596,10 @@ static void __init prom_instantiate_opal(void)
reserve_mem(base, size);
prom_debug("opal base = 0x%x\n", base);
prom_debug("opal align = 0x%x\n", align);
prom_debug("opal entry = 0x%x\n", entry);
prom_debug("opal size = 0x%x\n", (long)size);
prom_debug("opal base = 0x%llx\n", base);
prom_debug("opal align = 0x%llx\n", align);
prom_debug("opal entry = 0x%llx\n", entry);
prom_debug("opal size = 0x%llx\n", size);
prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
&base, sizeof(base));
......@@ -1674,7 +1676,7 @@ static void __init prom_instantiate_rtas(void)
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
prom_debug("rtas size = 0x%x\n", (long)size);
prom_debug("rtas size = 0x%x\n", size);
prom_debug("prom_instantiate_rtas: end...\n");
}
......@@ -1732,7 +1734,7 @@ static void __init prom_instantiate_sml(void)
if (base == 0)
prom_panic("Could not allocate memory for sml\n");
prom_printf("instantiating sml at 0x%x...", base);
prom_printf("instantiating sml at 0x%llx...", base);
memset((void *)base, 0, size);
......@@ -1751,8 +1753,8 @@ static void __init prom_instantiate_sml(void)
prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
&size, sizeof(size));
prom_debug("sml base = 0x%x\n", base);
prom_debug("sml size = 0x%x\n", (long)size);
prom_debug("sml base = 0x%llx\n", base);
prom_debug("sml size = 0x%x\n", size);
prom_debug("prom_instantiate_sml: end...\n");
}
......@@ -1845,7 +1847,7 @@ static void __init prom_initialize_tce_table(void)
prom_debug("TCE table: %s\n", path);
prom_debug("\tnode = 0x%x\n", node);
prom_debug("\tbase = 0x%x\n", base);
prom_debug("\tbase = 0x%llx\n", base);
prom_debug("\tsize = 0x%x\n", minsize);
/* Initialize the table to have a one-to-one mapping
......@@ -1932,12 +1934,12 @@ static void __init prom_hold_cpus(void)
}
prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
prom_debug(" 1) acknowledge = 0x%x\n",
prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
prom_debug(" 1) acknowledge = 0x%lx\n",
(unsigned long)acknowledge);
prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
/* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
......@@ -1965,7 +1967,7 @@ static void __init prom_hold_cpus(void)
prom_getprop(node, "reg", &reg, sizeof(reg));
cpu_no = be32_to_cpu(reg);
prom_debug("cpu hw idx = %lu\n", cpu_no);
prom_debug("cpu hw idx = %u\n", cpu_no);
/* Init the acknowledge var which will be reset by
* the secondary cpu when it awakens from its OF
......@@ -1975,7 +1977,7 @@ static void __init prom_hold_cpus(void)
if (cpu_no != prom.cpu) {
/* Primary Thread of non-boot cpu or any thread */
prom_printf("starting cpu hw idx %lu... ", cpu_no);
prom_printf("starting cpu hw idx %u... ", cpu_no);
call_prom("start-cpu", 3, 0, node,
secondary_hold, cpu_no);
......@@ -1986,11 +1988,11 @@ static void __init prom_hold_cpus(void)
if (*acknowledge == cpu_no)
prom_printf("done\n");
else
prom_printf("failed: %x\n", *acknowledge);
prom_printf("failed: %lx\n", *acknowledge);
}
#ifdef CONFIG_SMP
else
prom_printf("boot cpu hw idx %lu\n", cpu_no);
prom_printf("boot cpu hw idx %u\n", cpu_no);
#endif /* CONFIG_SMP */
}
......@@ -2268,7 +2270,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
while ((*mem_start + needed) > *mem_end) {
unsigned long room, chunk;
prom_debug("Chunk exhausted, claiming more at %x...\n",
prom_debug("Chunk exhausted, claiming more at %lx...\n",
alloc_bottom);
room = alloc_top - alloc_bottom;
if (room > DEVTREE_CHUNK_SIZE)
......@@ -2494,7 +2496,7 @@ static void __init flatten_device_tree(void)
room = alloc_top - alloc_bottom - 0x4000;
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
prom_debug("starting device tree allocs at %x\n", alloc_bottom);
prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
/* Now try to claim that */
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
......@@ -2557,7 +2559,7 @@ static void __init flatten_device_tree(void)
int i;
prom_printf("reserved memory map:\n");
for (i = 0; i < mem_reserve_cnt; i++)
prom_printf(" %x - %x\n",
prom_printf(" %llx - %llx\n",
be64_to_cpu(mem_reserve_map[i].base),
be64_to_cpu(mem_reserve_map[i].size));
}
......@@ -2567,9 +2569,9 @@ static void __init flatten_device_tree(void)
*/
mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
dt_string_start, dt_string_end);
prom_printf("Device tree struct 0x%x -> 0x%x\n",
prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
dt_struct_start, dt_struct_end);
}
......@@ -3001,7 +3003,7 @@ static void __init prom_find_boot_cpu(void)
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval);
prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
prom_debug("Booting CPU hw index = %d\n", prom.cpu);
}
static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
......@@ -3023,8 +3025,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
reserve_mem(prom_initrd_start,
prom_initrd_end - prom_initrd_start);
prom_debug("initrd_start=0x%x\n", prom_initrd_start);
prom_debug("initrd_end=0x%x\n", prom_initrd_end);
prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
......@@ -3277,7 +3279,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%x\n", hdr);
prom_debug("->dt_header_start=0x%lx\n", hdr);
}
#ifdef CONFIG_PPC32
......
......@@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
return H_HARDWARE;
if (mm_iommu_mapped_inc(mem))
......
......@@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
if (!mem)
return H_TOO_HARD;
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
&hpa)))
return H_HARDWARE;
pua = (void *) vmalloc_to_phys(pua);
......@@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
if (mem)
prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
IOMMU_PAGE_SHIFT_4K, &tces) == 0;
}
if (!prereg) {
......
......@@ -12,6 +12,7 @@
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/cache.h>
.text
......@@ -23,7 +24,7 @@ _GLOBAL(strncpy)
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
.balign 16
.balign IFETCH_ALIGN_BYTES
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
......@@ -43,7 +44,7 @@ _GLOBAL(strncmp)
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
.balign 16
.balign IFETCH_ALIGN_BYTES
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
......@@ -77,7 +78,7 @@ _GLOBAL(memchr)
beq- 2f
mtctr r5
addi r3,r3,-1
.balign 16
.balign IFETCH_ALIGN_BYTES
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b
......
......@@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
......@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
struct rcu_head rcu;
unsigned long used;
atomic64_t mapped;
unsigned int pageshift;
u64 ua; /* userspace address */
u64 entries; /* number of entries in hpas[] */
u64 *hpas; /* vmalloc'ed */
......@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
{
struct mm_iommu_table_group_mem_t *mem;
long i, j, ret = 0, locked_entries = 0;
unsigned int pageshift;
unsigned long flags;
struct page *page = NULL;
mutex_lock(&mem_list_mutex);
......@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto unlock_exit;
}
/*
* For a starting point for a maximum page size calculation
* we use @ua and @entries natural alignment to allow IOMMU pages
* smaller than huge pages but still bigger than PAGE_SIZE.
*/
mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
if (!mem->hpas) {
kfree(mem);
......@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
}
populate:
pageshift = PAGE_SHIFT;
if (PageCompound(page)) {
pte_t *pte;
struct page *head = compound_head(page);
unsigned int compshift = compound_order(head);
local_irq_save(flags); /* disables as well */
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
local_irq_restore(flags);
/* Double check it is still the same pinned page */
if (pte && pte_page(*pte) == head &&
pageshift == compshift)
pageshift = max_t(unsigned int, pageshift,
PAGE_SHIFT);
}
mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
......@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
EXPORT_SYMBOL_GPL(mm_iommu_find);
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa)
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
u64 *va = &mem->hpas[entry];
......@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
if (pageshift > mem->pageshift)
return -EFAULT;
*hpa = *va | (ua & ~PAGE_MASK);
return 0;
......@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa)
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
void *va = &mem->hpas[entry];
......@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
if (pageshift > mem->pageshift)
return -EFAULT;
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return -EFAULT;
......
......@@ -63,14 +63,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
* updating it. No write barriers are needed here, provided
* we only update the current CPU's SLB shadow buffer.
*/
p->save_area[index].esid = 0;
p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
WRITE_ONCE(p->save_area[index].esid, 0);
WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
}
static inline void slb_shadow_clear(enum slb_index index)
{
get_slb_shadow()->save_area[index].esid = 0;
WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
}
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
......
......@@ -202,25 +202,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
/* Load function address into r12 */
PPC_LI64(12, func);
/* For bpf-to-bpf function calls, the callee's address is unknown
* until the last extra pass. As seen above, we use PPC_LI64() to
* load the callee's address, but this may optimize the number of
* instructions required based on the nature of the address.
*
* Since we don't want the number of instructions emitted to change,
* we pad the optimized PPC_LI64() call with NOPs to guarantee that
* we always have a five-instruction sequence, which is the maximum
* that PPC_LI64() can emit.
*/
for (i = ctx->idx - ctx_idx; i < 5; i++)
PPC_NOP();
#ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to LR */
PPC_MTLR(b2p[TMP_REG_1]);
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
PPC_MTLR(12);
PPC_BPF_LL(2, 12, 8);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(12, 12, 0);
#endif
PPC_MTLR(12);
PPC_BLRL();
}
......
......@@ -28,6 +28,8 @@
#include <asm/sections.h>
#include <asm/time.h>
#include <platforms/chrp/chrp.h>
extern spinlock_t rtc_lock;
#define NVRAM_AS0 0x74
......@@ -63,7 +65,7 @@ long __init chrp_time_init(void)
return 0;
}
int chrp_cmos_clock_read(int addr)
static int chrp_cmos_clock_read(int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
......@@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr)
return (inb(nvram_data));
}
void chrp_cmos_clock_write(unsigned long val, int addr)
static void chrp_cmos_clock_write(unsigned long val, int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
......
......@@ -35,6 +35,8 @@
*/
#define HW_BROADWAY_ICR 0x00
#define HW_BROADWAY_IMR 0x04
#define HW_STARLET_ICR 0x08
#define HW_STARLET_IMR 0x0c
/*
......@@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d)
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
/* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
}
......
......@@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
boot_infos_t *bi = (boot_infos_t *) r4;
unsigned long hdr;
unsigned long space;
unsigned long ptr, x;
unsigned long ptr;
char *model;
unsigned long offset = reloc_offset();
......@@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
* MMU switched OFF, so this should not be useful anymore.
*/
if (bi->version < 4) {
unsigned long x __maybe_unused;
bootx_printf("Touching pages...\n");
/*
......
......@@ -352,6 +352,7 @@ static int pmac_late_init(void)
}