Skip to content
Snippets Groups Projects
Commit 08cab72f authored by Arnd Bergmann's avatar Arnd Bergmann
Browse files

Merge branch 'dt/gic' into next/dt

Conflicts:
	arch/arm/include/asm/localtimer.h
	arch/arm/mach-msm/board-msm8x60.c
	arch/arm/mach-omap2/board-generic.c
parents 86c1e5a7 f37a53cc
No related merge requests found
Showing
with 916 additions and 135 deletions
* ARM Generic Interrupt Controller
ARM SMP cores are often associated with a GIC, providing per processor
interrupts (PPI), shared processor interrupts (SPI) and software
generated interrupts (SGI).
Primary GIC is attached directly to the CPU and typically has PPIs and SGIs.
Secondary GICs are cascaded into the upward interrupt controller and do not
have PPIs or SGIs.
Main node required properties:
- compatible : should be one of:
"arm,cortex-a9-gic"
"arm,arm11mp-gic"
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The type shall be a <u32> and the value shall be 3.
The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
interrupts.
The 2nd cell contains the interrupt number for the interrupt type.
SPI interrupts are in the range [0-987]. PPI interrupts are in the
range [0-15].
The 3rd cell is the flags, encoded as follows:
bits[3:0] trigger type and level flags.
1 = low-to-high edge triggered
2 = high-to-low edge triggered
4 = active high level-sensitive
8 = active low level-sensitive
bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of
the 8 possible cpus attached to the GIC. A bit set to '1' indicated
the interrupt is wired to that CPU. Only valid for PPI interrupts.
- reg : Specifies base physical address(s) and size of the GIC registers. The
first region is the GIC distributor register base and size. The 2nd region is
the GIC cpu interface register base and size.
Optional
- interrupts : Interrupt source of the parent interrupt controller. Only
present on secondary GICs.
Example:
intc: interrupt-controller@fff11000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller;
reg = <0xfff11000 0x1000>,
<0xfff10100 0x100>;
};
......@@ -29,6 +29,7 @@ config ARM
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
......@@ -195,7 +196,8 @@ config VECTORS_BASE
The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime"
bool "Patch physical to virtual translations at runtime" if EMBEDDED
default y
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
......@@ -204,16 +206,25 @@ config ARM_PATCH_PHYS_VIRT
kernel in system memory.
This can only be used with non-XIP MMU kernels where the base
of physical memory is at a 16MB boundary, or theoretically 64K
for the MSM machine class.
of physical memory is at a 16MB boundary.
config ARM_PATCH_PHYS_VIRT_16BIT
def_bool y
depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
Only disable this option if you know that you do not require
this feature (eg, building a kernel for a single machine) and
you need to shrink the kernel to the minimal size.
config NEED_MACH_MEMORY_H
bool
help
This option extends the physical to virtual translation patching
to allow physical memory down to a theoretical minimum of 64K
boundaries.
Select this when mach/memory.h is required to provide special
definitions for this platform. The need for mach/memory.h should
be avoided when possible.
config PHYS_OFFSET
hex "Physical address of main memory"
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
help
Please provide the physical address corresponding to the
location of main memory in your system.
source "init/Kconfig"
......@@ -246,6 +257,7 @@ config ARCH_INTEGRATOR
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
select PLAT_VERSATILE_FPGA_IRQ
select NEED_MACH_MEMORY_H
help
Support for ARM's Integrator platform.
......@@ -261,6 +273,7 @@ config ARCH_REALVIEW
select PLAT_VERSATILE_CLCD
select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB
select NEED_MACH_MEMORY_H
help
This enables support for ARM Ltd RealView boards.
......@@ -301,7 +314,6 @@ config ARCH_AT91
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
select CLKDEV_LOOKUP
select ARM_PATCH_PHYS_VIRT if MMU
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
......@@ -322,6 +334,7 @@ config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x-based"
select CPU_ARM720T
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
Support for Cirrus Logic 711x/721x based boards.
......@@ -362,6 +375,7 @@ config ARCH_EBSA110
select ISA
select NO_IOPORT
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
This is an evaluation board for the StrongARM processor available
from Digital. It has limited hardware on-board, including an
......@@ -377,6 +391,7 @@ config ARCH_EP93XX
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_USES_GETTIMEOFFSET
select NEED_MEMORY_H
help
This enables support for the Cirrus EP93xx series of CPUs.
......@@ -385,6 +400,7 @@ config ARCH_FOOTBRIDGE
select CPU_SA110
select FOOTBRIDGE
select GENERIC_CLOCKEVENTS
select NEED_MACH_MEMORY_H
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
......@@ -434,6 +450,7 @@ config ARCH_IOP13XX
select PCI
select ARCH_SUPPORTS_MSI
select VMSPLIT_1G
select NEED_MACH_MEMORY_H
help
Support for Intel's IOP13XX (XScale) family of processors.
......@@ -464,6 +481,7 @@ config ARCH_IXP23XX
select CPU_XSC3
select PCI
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
Support for Intel's IXP23xx (XScale) family of processors.
......@@ -473,6 +491,7 @@ config ARCH_IXP2000
select CPU_XSCALE
select PCI
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
Support for Intel's IXP2400/2800 (XScale) family of processors.
......@@ -566,6 +585,7 @@ config ARCH_KS8695
select CPU_ARM922T
select ARCH_REQUIRE_GPIOLIB
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
System-on-Chip devices.
......@@ -657,6 +677,7 @@ config ARCH_SHMOBILE
select SPARSE_IRQ
select MULTI_IRQ_HANDLER
select PM_GENERIC_DOMAINS if PM
select NEED_MACH_MEMORY_H
help
Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
......@@ -671,6 +692,7 @@ config ARCH_RPC
select NO_IOPORT
select ARCH_SPARSEMEM_ENABLE
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
......@@ -689,6 +711,7 @@ config ARCH_SA1100
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
select NEED_MACH_MEMORY_H
help
Support for StrongARM 11x0 based boards.
......@@ -781,6 +804,7 @@ config ARCH_S5PV210
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select NEED_MACH_MEMORY_H
help
Samsung S5PV210/S5PC110 series based systems
......@@ -797,6 +821,7 @@ config ARCH_EXYNOS4
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select NEED_MACH_MEMORY_H
help
Samsung EXYNOS4 series based systems
......@@ -808,6 +833,7 @@ config ARCH_SHARK
select ZONE_DMA
select PCI
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
help
Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>).
......@@ -835,6 +861,7 @@ config ARCH_U300
select CLKDEV_LOOKUP
select HAVE_MACH_CLKDEV
select GENERIC_GPIO
select NEED_MACH_MEMORY_H
help
Support for ST-Ericsson U300 series mobile platforms.
......@@ -1407,6 +1434,31 @@ config SMP_ON_UP
If you don't know what to do here, say Y.
config ARM_CPU_TOPOLOGY
bool "Support cpu topology definition"
depends on SMP && CPU_V7
default y
help
Support ARM cpu topology definition. The MPIDR register defines
affinity between processors which is then used to describe the cpu
topology of an ARM System.
config SCHED_MC
bool "Multi-core scheduler support"
depends on ARM_CPU_TOPOLOGY
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
config SCHED_SMT
bool "SMT scheduler support"
depends on ARM_CPU_TOPOLOGY
help
Improves the CPU scheduler's decision making when dealing with
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
config HAVE_ARM_SCU
bool
help
......@@ -1807,6 +1859,38 @@ config ZBOOT_ROM_SH_MOBILE_SDHI
endchoice
config ARM_APPENDED_DTB
bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
depends on OF && !ZBOOT_ROM && EXPERIMENTAL
help
With this option, the boot code will look for a device tree binary
(DTB) appended to zImage
(e.g. cat zImage <filename>.dtb > zImage_w_dtb).
This is meant as a backward compatibility convenience for those
systems with a bootloader that can't be upgraded to accommodate
the documented boot protocol using a device tree.
Beware that there is very little in terms of protection against
this option being confused by leftover garbage in memory that might
look like a DTB header after a reboot if no actual DTB is appended
to zImage. Do not leave this option active in a production kernel
if you don't intend to always append a DTB. Proper passing of the
location into r2 of a bootloader provided DTB is always preferable
to this option.
config ARM_ATAG_DTB_COMPAT
bool "Supplement the appended DTB with traditional ATAG information"
depends on ARM_APPENDED_DTB
help
Some old bootloaders can't be updated to a DTB capable one, yet
they provide ATAGs with memory configuration, the ramdisk address,
the kernel cmdline string, etc. Such information is dynamically
provided by the bootloader and can't always be stored in a static
DTB. To allow a device tree enabled kernel to be used with such
bootloaders, this option allows zImage to extract the information
from the ATAG list and store it at run time into the appended DTB.
config CMDLINE
string "Default kernel command string"
default ""
......
......@@ -129,4 +129,10 @@ config DEBUG_S3C_UART
The uncompressor code port configuration is now handled
by CONFIG_S3C_LOWLEVEL_UART_PORT.
config ARM_KPROBES_TEST
tristate "Kprobes test module"
depends on KPROBES && MODULES
help
Perform tests of kprobes API and instruction set simulation.
endmenu
......@@ -128,6 +128,9 @@ textofs-$(CONFIG_PM_H1940) := 0x00108000
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
endif
textofs-$(CONFIG_ARCH_MSM7X30) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
# Machine directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
......
......@@ -5,3 +5,12 @@ piggy.lzo
piggy.lzma
vmlinux
vmlinux.lds
# borrowed libfdt files
fdt.c
fdt.h
fdt_ro.c
fdt_rw.c
fdt_wip.c
libfdt.h
libfdt_internal.h
......@@ -26,6 +26,10 @@ HEAD = head.o
OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
# string library code (-Os is enforced to keep it much smaller)
OBJS += string.o
CFLAGS_string.o := -Os
#
# Architecture dependencies
#
......@@ -89,21 +93,41 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo
suffix_$(CONFIG_KERNEL_LZMA) = lzma
# Borrowed libfdt files for the ATAG compatibility mode
libfdt := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c
libfdt_hdrs := fdt.h libfdt.h libfdt_internal.h
libfdt_objs := $(addsuffix .o, $(basename $(libfdt)))
$(addprefix $(obj)/,$(libfdt) $(libfdt_hdrs)): $(obj)/%: $(srctree)/scripts/dtc/libfdt/%
$(call cmd,shipped)
$(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \
$(addprefix $(obj)/,$(libfdt_hdrs))
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
OBJS += $(libfdt_objs) atags_to_fdt.o
endif
targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \
font.o font.c head.o misc.o $(OBJS)
lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean
extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S
extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
ccflags-y := -fpic -fno-builtin
ccflags-y := -fpic -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
......@@ -123,7 +147,7 @@ LDFLAGS_vmlinux += -T
# For __aeabi_uidivmod
lib1funcs = $(obj)/lib1funcs.o
$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE
$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
$(call cmd,shipped)
# We need to prevent any GOTOFF relocs being used with references
......
#include <asm/setup.h>
#include <libfdt.h>
static int node_offset(void *fdt, const char *node_path)
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
offset = fdt_add_subnode(fdt, 0, node_path);
return offset;
}
static int setprop(void *fdt, const char *node_path, const char *property,
uint32_t *val_array, int size)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop(fdt, offset, property, val_array, size);
}
static int setprop_string(void *fdt, const char *node_path,
const char *property, const char *string)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop_string(fdt, offset, property, string);
}
static int setprop_cell(void *fdt, const char *node_path,
const char *property, uint32_t val)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop_cell(fdt, offset, property, val);
}
/*
* Convert and fold provided ATAGs into the provided FDT.
*
* REturn values:
* = 0 -> pretend success
* = 1 -> bad ATAG (may retry with another possible ATAG pointer)
* < 0 -> error from libfdt
*/
int atags_to_fdt(void *atag_list, void *fdt, int total_space)
{
struct tag *atag = atag_list;
uint32_t mem_reg_property[2 * NR_BANKS];
int memcount = 0;
int ret;
/* make sure we've got an aligned pointer */
if ((u32)atag_list & 0x3)
return 1;
/* if we get a DTB here we're done already */
if (*(u32 *)atag_list == fdt32_to_cpu(FDT_MAGIC))
return 0;
/* validate the ATAG */
if (atag->hdr.tag != ATAG_CORE ||
(atag->hdr.size != tag_size(tag_core) &&
atag->hdr.size != 2))
return 1;
/* let's give it all the room it could need */
ret = fdt_open_into(fdt, fdt, total_space);
if (ret < 0)
return ret;
for_each_tag(atag, atag_list) {
if (atag->hdr.tag == ATAG_CMDLINE) {
setprop_string(fdt, "/chosen", "bootargs",
atag->u.cmdline.cmdline);
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
} else if (atag->hdr.tag == ATAG_INITRD2) {
uint32_t initrd_start, initrd_size;
initrd_start = atag->u.initrd.start;
initrd_size = atag->u.initrd.size;
setprop_cell(fdt, "/chosen", "linux,initrd-start",
initrd_start);
setprop_cell(fdt, "/chosen", "linux,initrd-end",
initrd_start + initrd_size);
}
}
if (memcount)
setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
return fdt_pack(fdt);
}
......@@ -216,6 +216,103 @@ restart: adr r0, LC0
mov r10, r6
#endif
mov r5, #0 @ init dtb size to 0
#ifdef CONFIG_ARM_APPENDED_DTB
/*
* r0 = delta
* r2 = BSS start
* r3 = BSS end
* r4 = final kernel address
* r5 = appended dtb size (still unknown)
* r6 = _edata
* r7 = architecture ID
* r8 = atags/device tree pointer
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*
* if there are device trees (dtb) appended to zImage, advance r10 so that the
* dtb data will get relocated along with the kernel if necessary.
*/
ldr lr, [r6, #0]
#ifndef __ARMEB__
ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
#else
ldr r1, =0xd00dfeed
#endif
cmp lr, r1
bne dtb_check_done @ not found
#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
/*
* OK... Let's do some funky business here.
* If we do have a DTB appended to zImage, and we do have
* an ATAG list around, we want the later to be translated
* and folded into the former here. To be on the safe side,
* let's temporarily move the stack away into the malloc
* area. No GOT fixup has occurred yet, but none of the
* code we're about to call uses any global variable.
*/
add sp, sp, #0x10000
stmfd sp!, {r0-r3, ip, lr}
mov r0, r8
mov r1, r6
sub r2, sp, r6
bl atags_to_fdt
/*
* If returned value is 1, there is no ATAG at the location
* pointed by r8. Try the typical 0x100 offset from start
* of RAM and hope for the best.
*/
cmp r0, #1
sub r0, r4, #(TEXT_OFFSET - 0x100)
mov r1, r6
sub r2, sp, r6
blne atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000
#endif
mov r8, r6 @ use the appended device tree
/*
* Make sure that the DTB doesn't end up in the final
* kernel's .bss area. To do so, we adjust the decompressed
* kernel size to compensate if that .bss size is larger
* than the relocated code.
*/
ldr r5, =_kernel_bss_size
adr r1, wont_overwrite
sub r1, r6, r1
subs r1, r5, r1
addhi r9, r9, r1
/* Get the dtb's size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
/* preserve 64-bit alignment */
add r5, r5, #7
bic r5, r5, #7
/* relocate some pointers past the appended dtb */
add r6, r6, r5
add r10, r10, r5
add sp, sp, r5
dtb_check_done:
#endif
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
......@@ -223,15 +320,14 @@ restart: adr r0, LC0
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
* r4 - 16k page directory >= r10 -> OK
* r4 + image length <= current position (pc) -> OK
* r4 + image length <= address of wont_overwrite -> OK
*/
add r10, r10, #16384
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
ARM( cmp r10, pc )
THUMB( mov lr, pc )
THUMB( cmp r10, lr )
adr r9, wont_overwrite
cmp r10, r9
bls wont_overwrite
/*
......@@ -285,14 +381,16 @@ wont_overwrite:
* r2 = BSS start
* r3 = BSS end
* r4 = kernel execution address
* r5 = appended dtb size (0 if not present)
* r7 = architecture ID
* r8 = atags pointer
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*/
teq r0, #0
orrs r1, r0, r5
beq not_relocated
add r11, r11, r0
add r12, r12, r0
......@@ -307,12 +405,21 @@ wont_overwrite:
/*
* Relocate all entries in the GOT table.
* Bump bss entries to _edata + dtb size
*/
1: ldr r1, [r11, #0] @ relocate entries in the GOT
add r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references.
add r1, r1, r0 @ This fixes up C references
cmp r1, r2 @ if entry >= bss_start &&
cmphs r3, r1 @ bss_end > entry
addhi r1, r1, r5 @ entry += dtb size
str r1, [r11], #4 @ next entry
cmp r11, r12
blo 1b
/* bump our bss pointers too */
add r2, r2, r5
add r3, r3, r5
#else
/*
......
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#define fdt16_to_cpu(x) be16_to_cpu(x)
#define cpu_to_fdt16(x) cpu_to_be16(x)
#define fdt32_to_cpu(x) be32_to_cpu(x)
#define cpu_to_fdt32(x) cpu_to_be32(x)
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
#endif
......@@ -18,14 +18,9 @@
unsigned int __machine_arch_type;
#define _LINUX_STRING_H_
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
#include <linux/types.h>
#include <linux/linkage.h>
#include <asm/string.h>
static void putstr(const char *ptr);
extern void error(char *x);
......@@ -101,41 +96,6 @@ static void putstr(const char *ptr)
flush();
}
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
int i = 0;
unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
for (i = __n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1)
*d++ = *s++;
return __dest;
}
/*
* gzip declarations
*/
......
/*
* arch/arm/boot/compressed/string.c
*
* Small subset of simple string routines
*/
#include <linux/string.h>
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
int i = 0;
unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
for (i = __n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1)
*d++ = *s++;
return __dest;
}
void *memmove(void *__dest, __const void *__src, size_t count)
{
unsigned char *d = __dest;
const unsigned char *s = __src;
if (__dest == __src)
return __dest;
if (__dest < __src)
return memcpy(__dest, __src, count);
while (count--)
d[count] = s[count];
return __dest;
}
size_t strlen(const char *s)
{
const char *sc = s;
while (*sc != '\0')
sc++;
return sc - s;
}
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count;
int res = 0;
while (su1 < end) {
res = *su1++ - *su2++;
if (res)
break;
}
return res;
}
int strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
int res = 0;
do {
c1 = *cs++;
c2 = *ct++;
res = c1 - c2;
if (res)
break;
} while (c1);
return res;
}
void *memchr(const void *s, int c, size_t count)
{
const unsigned char *p = s;
while (count--)
if ((unsigned char)c == *p++)
return (void *)(p - 1);
return NULL;
}
char *strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
#undef memset
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
void __memzero(void *s, size_t count)
{
memset(s, 0, count);
}
......@@ -51,6 +51,10 @@ SECTIONS
_got_start = .;
.got : { *(.got) }
_got_end = .;
/* ensure the zImage file size is always a multiple of 64 bits */
/* (without a dummy byte, ld just ignores the empty section) */
.pad : { BYTE(0); . = ALIGN(8); }
_edata = .;
. = BSS_START;
......
config ARM_GIC
select IRQ_DOMAIN
bool
config ARM_VIC
......
......@@ -24,10 +24,20 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/smp.h>
#include <linux/cpu_pm.h>
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
......@@ -71,8 +81,7 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d)
static inline unsigned int gic_irq(struct irq_data *d)
{
struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
return d->irq - gic_data->irq_offset;
return d->hwirq;
}
/*
......@@ -80,7 +89,7 @@ static inline unsigned int gic_irq(struct irq_data *d)
*/
static void gic_mask_irq(struct irq_data *d)
{
u32 mask = 1 << (d->irq % 32);
u32 mask = 1 << (gic_irq(d) % 32);
spin_lock(&irq_controller_lock);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
......@@ -91,7 +100,7 @@ static void gic_mask_irq(struct irq_data *d)
static void gic_unmask_irq(struct irq_data *d)
{
u32 mask = 1 << (d->irq % 32);
u32 mask = 1 << (gic_irq(d) % 32);
spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_unmask)
......@@ -172,7 +181,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (d->irq % 4) * 8;
unsigned int shift = (gic_irq(d) % 4) * 8;
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
u32 val, mask, bit;
......@@ -180,7 +189,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return -EINVAL;
mask = 0xff << shift;
bit = 1 << (cpu + shift);
bit = 1 << (cpu_logical_map(cpu) + shift);
spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
......@@ -223,7 +232,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
if (gic_irq == 1023)
goto out;
cascade_irq = gic_irq + chip_data->irq_offset;
cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
do_bad_IRQ(cascade_irq, desc);
else
......@@ -255,27 +264,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
irq_set_chained_handler(irq, gic_handle_cascade_irq);
}
static void __init gic_dist_init(struct gic_chip_data *gic,
unsigned int irq_start)
static void __init gic_dist_init(struct gic_chip_data *gic)
{
unsigned int gic_irqs, irq_limit, i;
unsigned int i, irq;
u32 cpumask;
unsigned int gic_irqs = gic->gic_irqs;
struct irq_domain *domain = &gic->domain;
void __iomem *base = gic->dist_base;
u32 cpumask = 1 << smp_processor_id();
u32 cpu = 0;
#ifdef CONFIG_SMP
cpu = cpu_logical_map(smp_processor_id());
#endif
cpumask = 1 << cpu;
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
writel_relaxed(0, base + GIC_DIST_CTRL);
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32;
if (gic_irqs > 1020)
gic_irqs = 1020;
/*
* Set all global interrupts to be level triggered, active low.
*/
......@@ -301,20 +308,21 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
for (i = 32; i < gic_irqs; i += 32)
writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
/*
* Limit number of interrupts registered to the platform maximum
*/
irq_limit = gic->irq_offset + gic_irqs;
if (WARN_ON(irq_limit > NR_IRQS))
irq_limit = NR_IRQS;
/*
* Setup the Linux IRQ subsystem.
*/
for (i = irq_start; i < irq_limit; i++) {
irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
irq_set_chip_data(i, gic);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
irq_domain_for_each_irq(domain, i, irq) {
if (i < 32) {
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &gic_chip,
handle_percpu_devid_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
irq_set_chip_and_handler(irq, &gic_chip,
handle_fasteoi_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_chip_data(irq, gic);
}
writel_relaxed(1, base + GIC_DIST_CTRL);
......@@ -343,23 +351,270 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
#ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
static void gic_dist_save(unsigned int gic_nr)
{
unsigned int gic_irqs;
void __iomem *dist_base;
int i;
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
if (!dist_base)
return;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
gic_data[gic_nr].saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
gic_data[gic_nr].saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic_data[gic_nr].saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
/*
* Restores the GIC distributor registers during resume or when coming out of
* idle. Must be called before enabling interrupts. If a level interrupt
* that occured while the GIC was suspended is still present, it will be
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
static void gic_dist_restore(unsigned int gic_nr)
{
unsigned int gic_irqs;
unsigned int i;
void __iomem *dist_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
if (!dist_base)
return;
writel_relaxed(0, dist_base + GIC_DIST_CTRL);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(0xa0a0a0a0,
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
dist_base + GIC_DIST_ENABLE_SET + i * 4);
writel_relaxed(1, dist_base + GIC_DIST_CTRL);
}
static void gic_cpu_save(unsigned int gic_nr)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
if (!dist_base || !cpu_base)
return;
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
}
static void gic_cpu_restore(unsigned int gic_nr)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
if (!dist_base || !cpu_base)
return;
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
int i;
for (i = 0; i < MAX_GIC_NR; i++) {
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(i);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
gic_cpu_restore(i);
break;
case CPU_CLUSTER_PM_ENTER:
gic_dist_save(i);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
gic_dist_restore(i);
break;
}
}
return NOTIFY_OK;
}
static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
static void __init gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
BUG_ON(!gic->saved_ppi_enable);
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
sizeof(u32));
BUG_ON(!gic->saved_ppi_conf);
cpu_pm_register_notifier(&gic_notifier_block);
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
{
}
#endif
#ifdef CONFIG_OF
static int gic_irq_domain_dt_translate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (d->of_node != controller)
return -EINVAL;
if (intsize < 3)
return -EINVAL;
/* Get the interrupt number and add 16 to skip over SGIs */
*out_hwirq = intspec[1] + 16;
/* For SPIs, we need to add 16 more to get the GIC irq ID number */
if (!intspec[0])
*out_hwirq += 16;
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
#endif
const struct irq_domain_ops gic_irq_domain_ops = {
#ifdef CONFIG_OF
.dt_translate = gic_irq_domain_dt_translate,
#endif
};
void __init gic_init(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
{
struct gic_chip_data *gic;
struct irq_domain *domain;
int gic_irqs;
BUG_ON(gic_nr >= MAX_GIC_NR);
gic = &gic_data[gic_nr];
domain = &gic->domain;
gic->dist_base = dist_base;
gic->cpu_base = cpu_base;
gic->irq_offset = (irq_start - 1) & ~31;
if (gic_nr == 0)
/*
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
if (gic_nr == 0) {
gic_cpu_base_addr = cpu_base;
domain->hwirq_base = 16;
if (irq_start > 0)
irq_start = (irq_start & ~31) + 16;
} else
domain->hwirq_base = 32;
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32;
if (gic_irqs > 1020)
gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
domain->nr_irq = gic_irqs - domain->hwirq_base;
domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
numa_node_id());
if (IS_ERR_VALUE(domain->irq_base)) {
WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
irq_start);
domain->irq_base = irq_start;
}
domain->priv = gic;
domain->ops = &gic_irq_domain_ops;
irq_domain_add(domain);
gic_dist_init(gic, irq_start);
gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic);
gic_cpu_init(gic);
gic_pm_init(gic);
}
void __cpuinit gic_secondary_init(unsigned int gic_nr)
......@@ -369,20 +624,15 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
gic_cpu_init(&gic_data[gic_nr]);
}
void __cpuinit gic_enable_ppi(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
irq_set_status_flags(irq, IRQ_NOPROBE);
gic_unmask_irq(irq_get_irq_data(irq));
local_irq_restore(flags);
}
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
unsigned long map = *cpus_addr(*mask);
int cpu;
unsigned long map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
......@@ -394,3 +644,35 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
}
#endif
#ifdef CONFIG_OF
static int gic_cnt __initdata = 0;
int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
int irq;
struct irq_domain *domain = &gic_data[gic_cnt].domain;
if (WARN_ON(!node))
return -ENODEV;
dist_base = of_iomap(node, 0);
WARN(!dist_base, "unable to map gic dist registers\n");
cpu_base = of_iomap(node, 1);
WARN(!cpu_base, "unable to map gic cpu registers\n");
domain->of_node = of_node_get(node);
gic_init(gic_cnt, -1, dist_base, cpu_base);
if (parent) {
irq = irq_of_parse_and_map(node, 0);
gic_cascade_irq(gic_cnt, irq);
}
gic_cnt++;
return 0;
}
#endif
......@@ -8,6 +8,7 @@
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
#define CPUID_MPIDR 5
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
......@@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
return read_cpuid(CPUID_TCM);
}
static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
{
return read_cpuid(CPUID_MPIDR);
}
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
......
......@@ -205,6 +205,13 @@ extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t);
/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
*/
extern void __init init_consistent_dma_size(unsigned long size);
#ifdef CONFIG_DMABOUNCE
/*
......
......@@ -25,13 +25,6 @@
movne r1, sp
adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r2, r6, lr
movne r0, sp
adrne lr, BSYM(1b)
bne do_local_timer
#endif
#endif
9997:
.endm
......
/*
* Annotations for marking C functions as exception handlers.
*
* These should only be used for C functions that are called from the low
* level exception entry code and not any intervening C code.
*/
#ifndef __ASM_ARM_EXCEPTION_H
#define __ASM_ARM_EXCEPTION_H
#include <linux/ftrace.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#endif
#endif /* __ASM_ARM_EXCEPTION_H */
......@@ -9,9 +9,6 @@
typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_LOCAL_TIMERS
unsigned int local_timer_irqs;
#endif
#ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI];
#endif
......
......@@ -22,15 +22,11 @@
* interrupt controller spec. To wit:
*
* Interrupts 0-15 are IPI
* 16-28 are reserved
* 29-31 are local. We allow 30 to be used for the watchdog.
* 16-31 are local. We allow 30 to be used for the watchdog.
* 32-1020 are global
* 1021-1022 are reserved
* 1023 is "spurious" (no interrupt)
*
* For now, we ignore all local interrupts so only return an interrupt if it's
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
*
* A simple read from the controller will tell us the number of the highest
* priority enabled interrupt. We then just need to check whether it is in the
* valid range for an IRQ (30-1020 inclusive).
......@@ -43,7 +39,7 @@
ldr \tmp, =1021
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #29
cmp \irqnr, #15
cmpcc \irqnr, \irqnr
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
......@@ -62,14 +58,3 @@
strcc \irqstat, [\base, #GIC_CPU_EOI]
cmpcs \irqnr, \irqnr
.endm
/* As above, this assumes that irqstat and base are preserved.. */
.macro test_for_ltirq, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
mov \tmp, #0
cmp \irqnr, #29
moveq \tmp, #1
streq \irqstat, [\base, #GIC_CPU_EOI]
cmp \tmp, #0
.endm
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment