Skip to content
Snippets Groups Projects
Commit c87d5d59 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge Qualcom Hexagon architecture

This is the fifth version of the patchset (with one tiny whitespace fix)
to the Linux kernel to support the Qualcomm Hexagon architecture.

Between now and the next pull requests, Richard Kuo should have his key
signed, etc., and should be back on kernel.org.  In the meantime, this
got merged as a emailed patch-series.

* Hexagon: (36 commits)
  Add extra arch overrides to asm-generic/checksum.h
  Hexagon: Add self to MAINTAINERS
  Hexagon: Add basic stacktrace functionality for Hexagon architecture.
  Hexagon: Add configuration and makefiles for the Hexagon architecture.
  Hexagon: Comet platform support
  Hexagon: kgdb support files
  Hexagon: Add page-fault support.
  Hexagon: Add page table header files & etc.
  Hexagon: Add ioremap support
  Hexagon: Provide DMA implementation
  Hexagon: Implement basic TLB management routines for Hexagon.
  Hexagon: Implement basic cache-flush support
  Hexagon: Provide basic implementation and/or stubs for I/O routines.
  Hexagon: Add user access functions
  Hexagon: Add locking types and functions
  Hexagon: Add SMP support
  Hexagon: Provide basic debugging and system trap support.
  Hexagon: Add ptrace support
  Hexagon: Add time and timer functions
  Hexagon: Add interrupts
  ...
parents 094803e0 4e29198e
No related merge requests found
Showing
with 1731 additions and 0 deletions
...@@ -5359,6 +5359,12 @@ F: fs/qnx4/ ...@@ -5359,6 +5359,12 @@ F: fs/qnx4/
F: include/linux/qnx4_fs.h F: include/linux/qnx4_fs.h
F: include/linux/qnxtypes.h F: include/linux/qnxtypes.h
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
S: Supported
F: arch/hexagon/
RADOS BLOCK DEVICE (RBD) RADOS BLOCK DEVICE (RBD)
F: include/linux/qnxtypes.h F: include/linux/qnxtypes.h
M: Yehuda Sadeh <yehuda@hq.newdream.net> M: Yehuda Sadeh <yehuda@hq.newdream.net>
......
# Hexagon configuration
comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
select HAVE_OPROFILE
select USE_GENERIC_SMP_HELPERS if SMP
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
# select ARCH_HAS_CPU_IDLE_WAIT
# select ARCH_WANT_OPTIONAL_GPIOLIB
# select ARCH_REQUIRE_GPIOLIB
# select HAVE_CLK
# select IRQ_PER_CPU
select HAVE_IRQ_WORK
# select GENERIC_PENDING_IRQ if SMP
select GENERIC_ATOMIC64
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_HARDIRQS_NO_DEPRECATED
# GENERIC_ALLOCATOR is used by dma_alloc_coherent()
select GENERIC_ALLOCATOR
select GENERIC_IRQ_SHOW
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select NO_IOPORT
# mostly generic routines, with some accelerated ones
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
config HEXAGON_ARCH_V1
bool
config HEXAGON_ARCH_V2
bool
config HEXAGON_ARCH_V3
bool
config HEXAGON_ARCH_V4
bool
config FRAME_POINTER
def_bool y
config LOCKDEP_SUPPORT
def_bool y
config PCI
def_bool n
config EARLY_PRINTK
def_bool y
config KTIME_SCALAR
def_bool y
config MMU
def_bool y
config TRACE_IRQFLAGS_SUPPORT
def_bool y
config GENERIC_CSUM
def_bool y
#
# Use the generic interrupt handling code in kernel/irq/:
#
config GENERIC_IRQ_PROBE
def_bool y
config GENERIC_IOMAP
def_bool y
#config ZONE_DMA
# bool
# default y
config HAS_DMA
bool
select HAVE_DMA_ATTRS
default y
config NEED_SG_DMA_LENGTH
def_bool y
config RWSEM_GENERIC_SPINLOCK
def_bool n
config RWSEM_XCHGADD_ALGORITHM
def_bool y
config GENERIC_FIND_NEXT_BIT
def_bool y
config GENERIC_HWEIGHT
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
config GENERIC_CLOCKEVENTS_BROADCAST
def_bool y
config STACKTRACE_SUPPORT
def_bool y
select STACKTRACE
config GENERIC_BUG
def_bool y
depends on BUG
config BUG
def_bool y
menu "Machine selection"
choice
prompt "System type"
default HEXAGON_ARCH_V2
config HEXAGON_COMET
bool "Comet Board"
select HEXAGON_ARCH_V2
---help---
Support for the Comet platform.
endchoice
config HEXAGON_VM
def_bool y
config CMDLINE
string "Default kernel command string"
default ""
help
On some platforms, there is currently no way for the boot loader
to pass arguments to the kernel. For these, you should supply some
command-line options at build time by entering them here. At a
minimum, you should specify the memory size and the root device
(e.g., mem=64M root=/dev/nfs).
config HEXAGON_ANGEL_TRAPS
bool "Use Angel Traps"
default n
---help---
Enable angel debug traps (for printk's).
config SMP
bool "Multi-Processing support"
---help---
Enables SMP support in the kernel. If unsure, say "Y"
config NR_CPUS
int "Maximum number of CPUs" if SMP
range 2 6 if SMP
default "1" if !SMP
default "6" if SMP
---help---
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 6 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
---help---
Changes the default page size; use with caution.
config PAGE_SIZE_4KB
bool "4KB"
config PAGE_SIZE_16KB
bool "16KB"
config PAGE_SIZE_64KB
bool "64KB"
config PAGE_SIZE_256KB
bool "256KB"
endchoice
source "mm/Kconfig"
source "kernel/Kconfig.hz"
source "kernel/time/Kconfig"
config GENERIC_GPIO
bool "Generic GPIO support"
default n
endmenu
source "init/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
menu "Executable File Formats"
source "fs/Kconfig.binfmt"
endmenu
source "net/Kconfig"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
menu "Kernel hacking"
source "lib/Kconfig.debug"
endmenu
# Makefile for the Hexagon arch
KBUILD_DEFCONFIG = comet_defconfig
# Do not use GP-relative jumps
KBUILD_CFLAGS += -G0
LDFLAGS_vmlinux += -G0
# Do not use single-byte enums; these will overflow.
KBUILD_CFLAGS += -fno-short-enums
# Modules must use either long-calls, or use pic/plt.
# Use long-calls for now, it's easier. And faster.
# CFLAGS_MODULE += -fPIC
# LDFLAGS_MODULE += -shared
CFLAGS_MODULE += -mlong-calls
cflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
cflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
cflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
cflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
aflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
aflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
aflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
aflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
ldflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
ldflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
ldflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
ldflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_AFLAGS += $(aflags-y)
# no KBUILD_LDFLAGS?
LDFLAGS += $(ldflags-y)
# Thread-info register will be r19. This value is not configureable;
# it is hard-coded in several files.
TIR_NAME := r19
KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
libs-y += $(LIBGCC)
head-y := arch/hexagon/kernel/head.o \
arch/hexagon/kernel/init_task.o
core-y += arch/hexagon/kernel/ \
arch/hexagon/mm/ \
arch/hexagon/lib/
# arch/hexagon/platform/common/
#
#core-$(CONFIG_HEXAGON_COMET) += arch/hexagon/platform/comet/
#machine-$(CONFIG_HEXAGON_COMET) := comet
CONFIG_SMP=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=0
CONFIG_HZ_100=y
CONFIG_EXPERIMENTAL=y
CONFIG_CROSS_COMPILE="hexagon-"
CONFIG_LOCALVERSION="-smp"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_NETDEVICES=y
CONFIG_MII=y
CONFIG_PHYLIB=y
CONFIG_NET_ETHERNET=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_CONSOLE_TRANSLATIONS is not set
CONFIG_LEGACY_PTY_COUNT=64
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
CONFIG_SPI_DEBUG=y
CONFIG_SPI_BITBANG=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=y
CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
include include/asm-generic/Kbuild.asm
header-y += registers.h
header-y += ucontext.h
header-y += user.h
generic-y += auxvec.h
generic-y += bug.h
generic-y += bugs.h
generic-y += cpumask.h
generic-y += cputime.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += iomap.h
generic-y += ipcbuf.h
generic-y += ipc.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local64.h
generic-y += local.h
generic-y += local.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += pci.h
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += siginfo.h
generic-y += socket.h
generic-y += sockios.h
generic-y += statfs.h
generic-y += stat.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += xor.h
#include <generated/asm-offsets.h>
/*
* Atomic operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
#include <linux/types.h>
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
/**
* atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
*
* Parameters are then pointer, value-in-register, value-in-register,
* and the output is the old value.
*
* Apparently this is complicated for archs that don't support
* the memw_locked like we do (or it's broken or whatever).
*
* Kind of the lynchpin of the rest of the generically defined routines.
* Remember V2 had that bug with dotnew predicate set by memw_locked.
*
* "old" is "expected" old val, __oldval is actual old value
*/
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
asm volatile(
"1: %0 = memw_locked(%1);\n"
" { P0 = cmp.eq(%0,%2);\n"
" if (!P0.new) jump:nt 2f; }\n"
" memw_locked(%1,P0) = %3;\n"
" if (!P0) jump 1b;\n"
"2:\n"
: "=&r" (__oldval)
: "r" (&v->counter), "r" (old), "r" (new)
: "memory", "p0"
);
return __oldval;
}
static inline int atomic_add_return(int i, atomic_t *v)
{
int output;
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n"
" %0 = add(%0,%2);\n"
" memw_locked(%1,P3)=%0;\n"
" if !P3 jump 1b;\n"
: "=&r" (output)
: "r" (&v->counter), "r" (i)
: "memory", "p3"
);
return output;
}
#define atomic_add(i, v) atomic_add_return(i, (v))
static inline int atomic_sub_return(int i, atomic_t *v)
{
int output;
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n"
" %0 = sub(%0,%2);\n"
" memw_locked(%1,P3)=%0\n"
" if !P3 jump 1b;\n"
: "=&r" (output)
: "r" (&v->counter), "r" (i)
: "memory", "p3"
);
return output;
}
#define atomic_sub(i, v) atomic_sub_return(i, (v))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
*
* Returns 1 if the add happened, 0 if it didn't.
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int output, __oldval;
asm volatile(
"1: %0 = memw_locked(%2);"
" {"
" p3 = cmp.eq(%0, %4);"
" if (p3.new) jump:nt 2f;"
" %0 = add(%0, %3);"
" %1 = #0;"
" }"
" memw_locked(%2, p3) = %0;"
" {"
" if !p3 jump 1b;"
" %1 = #1;"
" }"
"2:"
: "=&r" (__oldval), "=&r" (output)
: "r" (v), "r" (a), "r" (u)
: "memory", "p3"
);
return output;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#endif
/*
* Bit operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <asm/atomic.h>
#ifdef __KERNEL__
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/*
* The offset calculations for these are based on BITS_PER_LONG == 32
* (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
* mask by 0x0000001F)
*
* Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
*/
/**
* test_and_clear_bit - clear a bit and return its old value
* @nr: bit number to clear
* @addr: pointer to memory
*/
static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int oldval;
__asm__ __volatile__ (
" {R10 = %1; R11 = asr(%2,#5); }\n"
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
);
return oldval;
}
/**
* test_and_set_bit - set a bit and return its old value
* @nr: bit number to set
* @addr: pointer to memory
*/
static inline int test_and_set_bit(int nr, volatile void *addr)
{
int oldval;
__asm__ __volatile__ (
" {R10 = %1; R11 = asr(%2,#5); }\n"
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
);
return oldval;
}
/**
* test_and_change_bit - toggle a bit and return its old value
* @nr: bit number to set
* @addr: pointer to memory
*/
static inline int test_and_change_bit(int nr, volatile void *addr)
{
int oldval;
__asm__ __volatile__ (
" {R10 = %1; R11 = asr(%2,#5); }\n"
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
);
return oldval;
}
/*
* Atomic, but doesn't care about the return value.
* Rewrite later to save a cycle or two.
*/
static inline void clear_bit(int nr, volatile void *addr)
{
test_and_clear_bit(nr, addr);
}
static inline void set_bit(int nr, volatile void *addr)
{
test_and_set_bit(nr, addr);
}
static inline void change_bit(int nr, volatile void *addr)
{
test_and_change_bit(nr, addr);
}
/*
* These are allowed to be non-atomic. In fact the generic flavors are
* in non-atomic.h. Would it be better to use intrinsics for this?
*
* OK, writes in our architecture do not invalidate LL/SC, so this has to
* be atomic, particularly for things like slab_lock and slab_unlock.
*
*/
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
test_and_clear_bit(nr, addr);
}
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
test_and_set_bit(nr, addr);
}
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
test_and_change_bit(nr, addr);
}
/* Apparently, at least some of these are allowed to be non-atomic */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}
static inline int __test_bit(int nr, const volatile unsigned long *addr)
{
int retval;
asm volatile(
"{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
: "=&r" (retval)
: "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
: "p0"
);
return retval;
}
#define test_bit(nr, addr) __test_bit(nr, addr)
/*
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline long ffz(int x)
{
int r;
asm("%0 = ct1(%1);\n"
: "=&r" (r)
: "r" (x));
return r;
}
/*
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline long fls(int x)
{
int r;
asm("{ %0 = cl0(%1);}\n"
"%0 = sub(#32,%0);\n"
: "=&r" (r)
: "r" (x)
: "p0");
return r;
}
/*
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline long ffs(int x)
{
int r;
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
"{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
: "=&r" (r)
: "r" (x)
: "p0");
return r;
}
/*
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*
* bits_per_long assumed to be 32
* numbering starts at 0 I think (instead of 1 like ffs)
*/
static inline unsigned long __ffs(unsigned long word)
{
int num;
asm("%0 = ct0(%1);\n"
: "=&r" (num)
: "r" (word));
return num;
}
/*
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
* bits_per_long assumed to be 32
*/
static inline unsigned long __fls(unsigned long word)
{
int num;
asm("%0 = cl0(%1);\n"
"%0 = sub(#31,%0);\n"
: "=&r" (num)
: "r" (word));
return num;
}
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* __KERNEL__ */
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_HEXAGON_BITSPERLONG_H
#define __ASM_HEXAGON_BITSPERLONG_H
#define __BITS_PER_LONG 32
#include <asm-generic/bitsperlong.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_BYTEORDER_H
#define _ASM_BYTEORDER_H
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
#endif
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_BYTEORDER_H */
/*
* Cache definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
/* Bytes per L1 cache line */
#define L1_CACHE_SHIFT (5)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
/* See http://kerneltrap.org/node/15100 */
#define __read_mostly
#endif
/*
* Cache flush operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
#include <linux/cache.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm-generic/cacheflush.h>
/* Cache flushing:
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
*
* Need to doublecheck which one is really needed for ptrace stuff to work.
*/
#define LINESIZE 32
#define LINEBITS 5
/*
* Flush Dcache range through current map.
*/
extern void flush_dcache_range(unsigned long start, unsigned long end);
/*
* Flush Icache range through current map.
*/
#undef flush_icache_range
extern void flush_icache_range(unsigned long start, unsigned long end);
/*
* Memory-management related flushes are there to ensure in non-physically
* indexed cache schemes that stale lines belonging to a given ASID aren't
* in the cache to confuse things. The prototype Hexagon Virtual Machine
* only uses a single ASID for all user-mode maps, which should
* mean that they aren't necessary. A brute-force, flush-everything
* implementation, with the name xxxxx_hexagon() is present in
* arch/hexagon/mm/cache.c, but let's not wire it up until we know
* it is needed.
*/
extern void flush_cache_all_hexagon(void);
/*
* This may or may not ever have to be non-null, depending on the
* virtual machine MMU. For a native kernel, it's definitiely a no-op
*
* This is also the place where deferred cache coherency stuff seems
* to happen, classically... but instead we do it like ia64 and
* clean the cache when the PTE is set.
*
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* generic_ptrace_pokedata doesn't wind up here, does it? */
}
#undef copy_to_user_page
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page,
unsigned long vaddr,
void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
#define do_csum do_csum
unsigned int do_csum(const void *voidptr, int len);
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
#define csum_partial_copy_nocheck csum_partial_copy_nocheck
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
#define csum_tcpudp_nofold csum_tcpudp_nofold
__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto, __wsum sum);
#define csum_tcpudp_magic csum_tcpudp_magic
__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto, __wsum sum);
#include <asm-generic/checksum.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_DELAY_H
#define _ASM_DELAY_H
#include <asm/param.h>
extern void __udelay(unsigned long usecs);
#define udelay(usecs) __udelay((usecs))
#endif /* _ASM_DELAY_H */
/*
* DMA operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
#include <asm/io.h>
struct device;
extern int bad_dma_address;
extern struct dma_map_ops *dma_ops;
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (unlikely(dev == NULL))
return NULL;
return dma_ops;
}
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#include <asm-generic/dma-mapping-common.h>
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size - 1 <= *dev->dma_mask;
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
ret = ops->alloc_coherent(dev, size, dma_handle, flag);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/io.h>
#define MAX_DMA_CHANNELS 1
#define MAX_DMA_ADDRESS (PAGE_OFFSET)
extern size_t hexagon_coherent_pool_size;
#endif
/*
* ELF definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_ELF_H
#define __ASM_ELF_H
#include <asm/ptrace.h>
#include <asm/user.h>
/*
* This should really be in linux/elf-em.h.
*/
#define EM_HEXAGON 164 /* QUALCOMM Hexagon */
struct elf32_hdr;
/*
* ELF header e_flags defines.
*/
/* should have stuff like "CPU type" and maybe "ABI version", etc */
/* Hexagon relocations */
/* V2 */
#define R_HEXAGON_NONE 0
#define R_HEXAGON_B22_PCREL 1
#define R_HEXAGON_B15_PCREL 2
#define R_HEXAGON_B7_PCREL 3
#define R_HEXAGON_LO16 4
#define R_HEXAGON_HI16 5
#define R_HEXAGON_32 6
#define R_HEXAGON_16 7
#define R_HEXAGON_8 8
#define R_HEXAGON_GPREL16_0 9
#define R_HEXAGON_GPREL16_1 10
#define R_HEXAGON_GPREL16_2 11
#define R_HEXAGON_GPREL16_3 12
#define R_HEXAGON_HL16 13
/* V3 */
#define R_HEXAGON_B13_PCREL 14
/* V4 */
#define R_HEXAGON_B9_PCREL 15
/* V4 (extenders) */
#define R_HEXAGON_B32_PCREL_X 16
#define R_HEXAGON_32_6_X 17
/* V4 (extended) */
#define R_HEXAGON_B22_PCREL_X 18
#define R_HEXAGON_B15_PCREL_X 19
#define R_HEXAGON_B13_PCREL_X 20
#define R_HEXAGON_B9_PCREL_X 21
#define R_HEXAGON_B7_PCREL_X 22
#define R_HEXAGON_16_X 23
#define R_HEXAGON_12_X 24
#define R_HEXAGON_11_X 25
#define R_HEXAGON_10_X 26
#define R_HEXAGON_9_X 27
#define R_HEXAGON_8_X 28
#define R_HEXAGON_7_X 29
#define R_HEXAGON_6_X 30
/* V2 PIC */
#define R_HEXAGON_32_PCREL 31
#define R_HEXAGON_COPY 32
#define R_HEXAGON_GLOB_DAT 33
#define R_HEXAGON_JMP_SLOT 34
#define R_HEXAGON_RELATIVE 35
#define R_HEXAGON_PLT_B22_PCREL 36
#define R_HEXAGON_GOTOFF_LO16 37
#define R_HEXAGON_GOTOFF_HI16 38
#define R_HEXAGON_GOTOFF_32 39
#define R_HEXAGON_GOT_LO16 40
#define R_HEXAGON_GOT_HI16 41
#define R_HEXAGON_GOT_32 42
#define R_HEXAGON_GOT_16 43
/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
typedef struct user_regs_struct elf_gregset_t;
#define ELF_NGREG (sizeof(elf_gregset_t)/sizeof(unsigned long))
/* Placeholder */
typedef unsigned long elf_fpregset_t;
/*
* Bypass the whole "regsets" thing for now and use the define.
*/
#define ELF_CORE_COPY_REGS(DEST, REGS) \
do { \
DEST.r0 = REGS->r00; \
DEST.r1 = REGS->r01; \
DEST.r2 = REGS->r02; \
DEST.r3 = REGS->r03; \
DEST.r4 = REGS->r04; \
DEST.r5 = REGS->r05; \
DEST.r6 = REGS->r06; \
DEST.r7 = REGS->r07; \
DEST.r8 = REGS->r08; \
DEST.r9 = REGS->r09; \
DEST.r10 = REGS->r10; \
DEST.r11 = REGS->r11; \
DEST.r12 = REGS->r12; \
DEST.r13 = REGS->r13; \
DEST.r14 = REGS->r14; \
DEST.r15 = REGS->r15; \
DEST.r16 = REGS->r16; \
DEST.r17 = REGS->r17; \
DEST.r18 = REGS->r18; \
DEST.r19 = REGS->r19; \
DEST.r20 = REGS->r20; \
DEST.r21 = REGS->r21; \
DEST.r22 = REGS->r22; \
DEST.r23 = REGS->r23; \
DEST.r24 = REGS->r24; \
DEST.r25 = REGS->r25; \
DEST.r26 = REGS->r26; \
DEST.r27 = REGS->r27; \
DEST.r28 = REGS->r28; \
DEST.r29 = pt_psp(REGS); \
DEST.r30 = REGS->r30; \
DEST.r31 = REGS->r31; \
DEST.sa0 = REGS->sa0; \
DEST.lc0 = REGS->lc0; \
DEST.sa1 = REGS->sa1; \
DEST.lc1 = REGS->lc1; \
DEST.m0 = REGS->m0; \
DEST.m1 = REGS->m1; \
DEST.usr = REGS->usr; \
DEST.p3_0 = REGS->preds; \
DEST.gp = REGS->gp; \
DEST.ugp = REGS->ugp; \
DEST.pc = pt_elr(REGS); \
DEST.cause = pt_cause(REGS); \
DEST.badva = pt_badva(REGS); \
} while (0);
/*
* This is used to ensure we don't load something for the wrong architecture.
* Checks the machine and ABI type.
*/
#define elf_check_arch(hdr) ((hdr)->e_machine == EM_HEXAGON)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_HEXAGON
#ifdef CONFIG_HEXAGON_ARCH_V2
#define ELF_CORE_EFLAGS 0x1
#endif
#ifdef CONFIG_HEXAGON_ARCH_V3
#define ELF_CORE_EFLAGS 0x2
#endif
#ifdef CONFIG_HEXAGON_ARCH_V4
#define ELF_CORE_EFLAGS 0x3
#endif
/*
* Some architectures have ld.so set up a pointer to a function
* to be registered using atexit, to facilitate cleanup. So that
* static executables will be well-behaved, we would null the register
* in question here, in the pt_regs structure passed. For now,
* leave it a null macro.
*/
#define ELF_PLAT_INIT(regs, load_addr) do { } while (0)
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Hrm is this going to cause problems for changing PAGE_SIZE? */
#define ELF_EXEC_PAGESIZE 4096
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE 0x08000000UL
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#endif
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#endif
/*
* Fixmap support for Hexagon - enough to support highmem features
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
/*
* A lot of the fixmap info is already in mem-layout.h
*/
#include <asm/mem-layout.h>
/*
* Full fixmap support involves set_fixmap() functions, but
* these may not be needed if all we're after is an area for
* highmem kernel mappings.
*/
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/**
* fix_to_virt -- "index to address" translation.
*
* If anyone tries to use the idx directly without translation,
* we catch the bug with a NULL-deference kernel oops. Illegal
* ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* This branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
(vaddr)), (vaddr)), (vaddr))
#endif
/*
* If the FPU is used inside the kernel,
* kernel_fpu_end() will be defined here.
*/
#ifndef _ASM_HEXAGON_FUTEX_H
#define _ASM_HEXAGON_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
/* XXX TODO-- need to add sync barriers! */
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile( \
"1: %0 = memw_locked(%3);\n" \
/* For example: %1 = %4 */ \
insn \
"2: memw_locked(%3,p2) = %1;\n" \
" if !p2 jump 1b;\n" \
" %1 = #0;\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: %1 = #%5;\n" \
" jump 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".long 1b,4b,2b,4b\n" \
".previous\n" \
: "=&r" (oldval), "=&r" (ret), "+m" (*uaddr) \
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "p2", "memory")
static inline int
futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("%1 = %4\n", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("%1 = add(%0,%4)\n", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("%1 = or(%0,%4)\n", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("%1 = not(%4); %1 = and(%0,%1)\n", ret,
oldval, uaddr, oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("%1 = xor(%0,%4)\n", ret, oldval, uaddr,
oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
ret = (oldval == cmparg);
break;
case FUTEX_OP_CMP_NE:
ret = (oldval != cmparg);
break;
case FUTEX_OP_CMP_LT:
ret = (oldval < cmparg);
break;
case FUTEX_OP_CMP_GE:
ret = (oldval >= cmparg);
break;
case FUTEX_OP_CMP_LE:
ret = (oldval <= cmparg);
break;
case FUTEX_OP_CMP_GT:
ret = (oldval > cmparg);
break;
default:
ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
u32 newval)
{
int prev;
int ret;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
__asm__ __volatile__ (
"1: %1 = memw_locked(%3)\n"
" {\n"
" p2 = cmp.eq(%1,%4)\n"
" if !p2.new jump:NT 3f\n"
" }\n"
"2: memw_locked(%3,p2) = %5\n"
" if !p2 jump 1b\n"
"3:\n"
".section .fixup,\"ax\"\n"
"4: %0 = #%6\n"
" jump 3b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 1b,4b,2b,4b\n"
".previous\n"
: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "i"(-EFAULT)
: "p2", "memory");
*uval = prev;
return ret;
}
#endif /* __KERNEL__ */
#endif /* _ASM_HEXAGON_FUTEX_H */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment