Skip to content
Snippets Groups Projects
Commit 34471a91 authored by Russell King's avatar Russell King
Browse files

Merge branch 'ppi-irq-core-for-rmk' of git://github.com/mzyngier/arm-platforms into devel-stable

parents cefd3e71 28af690a
No related merge requests found
Showing
with 376 additions and 105 deletions
......@@ -1420,6 +1420,31 @@ config SMP_ON_UP
If you don't know what to do here, say Y.
config ARM_CPU_TOPOLOGY
bool "Support cpu topology definition"
depends on SMP && CPU_V7
default y
help
Support ARM cpu topology definition. The MPIDR register defines
affinity between processors which is then used to describe the cpu
topology of an ARM System.
config SCHED_MC
bool "Multi-core scheduler support"
depends on ARM_CPU_TOPOLOGY
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
config SCHED_SMT
bool "SMT scheduler support"
depends on ARM_CPU_TOPOLOGY
help
Improves the CPU scheduler's decision making when dealing with
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
config HAVE_ARM_SCU
bool
help
......
......@@ -29,6 +29,9 @@
#include <linux/cpu_pm.h>
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
......@@ -181,7 +184,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return -EINVAL;
mask = 0xff << shift;
bit = 1 << (cpu + shift);
bit = 1 << (cpu_logical_map(cpu) + shift);
spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
......@@ -260,9 +263,16 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
unsigned int irq_start)
{
unsigned int gic_irqs, irq_limit, i;
u32 cpumask;
void __iomem *base = gic->dist_base;
u32 cpumask = 1 << smp_processor_id();
u32 cpu = 0;
u32 nrppis = 0, ppi_base = 0;
#ifdef CONFIG_SMP
cpu = cpu_logical_map(smp_processor_id());
#endif
cpumask = 1 << cpu;
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
......@@ -279,6 +289,23 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
gic->gic_irqs = gic_irqs;
/*
* Nobody would be insane enough to use PPIs on a secondary
* GIC, right?
*/
if (gic == &gic_data[0]) {
nrppis = (32 - irq_start) & 31;
/* The GIC only supports up to 16 PPIs. */
if (nrppis > 16)
BUG();
ppi_base = gic->irq_offset + 32 - nrppis;
}
pr_info("Configuring GIC with %d sources (%d PPIs)\n",
gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);
/*
* Set all global interrupts to be level triggered, active low.
*/
......@@ -314,7 +341,17 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
/*
* Setup the Linux IRQ subsystem.
*/
for (i = irq_start; i < irq_limit; i++) {
for (i = 0; i < nrppis; i++) {
int ppi = i + ppi_base;
irq_set_percpu_devid(ppi);
irq_set_chip_and_handler(ppi, &gic_chip,
handle_percpu_devid_irq);
irq_set_chip_data(ppi, gic);
set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
}
for (i = irq_start + nrppis; i < irq_limit; i++) {
irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
irq_set_chip_data(i, gic);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
......@@ -557,20 +594,15 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
gic_cpu_init(&gic_data[gic_nr]);
}
void __cpuinit gic_enable_ppi(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
irq_set_status_flags(irq, IRQ_NOPROBE);
gic_unmask_irq(irq_get_irq_data(irq));
local_irq_restore(flags);
}
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
unsigned long map = *cpus_addr(*mask);
int cpu;
unsigned long map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
......
......@@ -8,6 +8,7 @@
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
#define CPUID_MPIDR 5
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
......@@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
return read_cpuid(CPUID_TCM);
}
static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
{
return read_cpuid(CPUID_MPIDR);
}
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
......
......@@ -25,13 +25,6 @@
movne r1, sp
adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r2, r6, lr
movne r0, sp
adrne lr, BSYM(1b)
bne do_local_timer
#endif
#endif
9997:
.endm
......
/*
* Annotations for marking C functions as exception handlers.
*
* These should only be used for C functions that are called from the low
* level exception entry code and not any intervening C code.
*/
#ifndef __ASM_ARM_EXCEPTION_H
#define __ASM_ARM_EXCEPTION_H
#include <linux/ftrace.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#endif
#endif /* __ASM_ARM_EXCEPTION_H */
......@@ -9,9 +9,6 @@
typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_LOCAL_TIMERS
unsigned int local_timer_irqs;
#endif
#ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI];
#endif
......
......@@ -22,15 +22,11 @@
* interrupt controller spec. To wit:
*
* Interrupts 0-15 are IPI
* 16-28 are reserved
* 29-31 are local. We allow 30 to be used for the watchdog.
* 16-31 are local. We allow 30 to be used for the watchdog.
* 32-1020 are global
* 1021-1022 are reserved
* 1023 is "spurious" (no interrupt)
*
* For now, we ignore all local interrupts so only return an interrupt if it's
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
*
* A simple read from the controller will tell us the number of the highest
* priority enabled interrupt. We then just need to check whether it is in the
* valid range for an IRQ (30-1020 inclusive).
......@@ -43,7 +39,7 @@
ldr \tmp, =1021
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #29
cmp \irqnr, #15
cmpcc \irqnr, \irqnr
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
......@@ -62,14 +58,3 @@
strcc \irqstat, [\base, #GIC_CPU_EOI]
cmpcs \irqnr, \irqnr
.endm
/* As above, this assumes that irqstat and base are preserved.. */
.macro test_for_ltirq, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
mov \tmp, #0
cmp \irqnr, #29
moveq \tmp, #1
streq \irqstat, [\base, #GIC_CPU_EOI]
cmp \tmp, #0
.endm
......@@ -40,7 +40,6 @@ void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
void gic_secondary_init(unsigned int);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
void gic_enable_ppi(unsigned int);
struct gic_chip_data {
unsigned int irq_offset;
......
......@@ -10,6 +10,8 @@
#ifndef __ASM_ARM_LOCALTIMER_H
#define __ASM_ARM_LOCALTIMER_H
#include <linux/interrupt.h>
struct clock_event_device;
/*
......@@ -17,27 +19,20 @@ struct clock_event_device;
*/
void percpu_timer_setup(void);
/*
* Called from assembly, this is the local timer IRQ handler
*/
asmlinkage void do_local_timer(struct pt_regs *);
#ifdef CONFIG_LOCAL_TIMERS
#ifdef CONFIG_HAVE_ARM_TWD
#include "smp_twd.h"
#define local_timer_ack() twd_timer_ack()
#define local_timer_stop(c) twd_timer_stop((c))
#else
/*
* Platform provides this to acknowledge a local timer IRQ.
* Returns true if the local timer IRQ is to be processed.
* Stop the local timer
*/
int local_timer_ack(void);
void local_timer_stop(struct clock_event_device *);
#endif
......@@ -52,6 +47,10 @@ static inline int local_timer_setup(struct clock_event_device *evt)
{
return -ENXIO;
}
static inline void local_timer_stop(struct clock_event_device *evt)
{
}
#endif
#endif
......@@ -32,6 +32,11 @@ extern void show_ipi_list(struct seq_file *, int);
*/
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
/*
* Called from C code, this handles an IPI.
*/
void handle_IPI(int ipinr, struct pt_regs *regs);
/*
* Setup the set of possible CPUs (via set_cpu_possible)
*/
......@@ -65,6 +70,12 @@ extern void platform_secondary_init(unsigned int cpu);
*/
extern void platform_smp_prepare_cpus(unsigned int);
/*
* Logical CPU mapping.
*/
extern int __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Initial data for bringing up a secondary CPU.
*/
......@@ -88,9 +99,4 @@ extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
/*
* show local interrupt info
*/
extern void show_local_irqs(struct seq_file *, int);
#endif /* ifndef __ASM_ARM_SMP_H */
......@@ -22,7 +22,7 @@ struct clock_event_device;
extern void __iomem *twd_base;
int twd_timer_ack(void);
void twd_timer_setup(struct clock_event_device *);
void twd_timer_stop(struct clock_event_device *);
#endif
......@@ -62,13 +62,6 @@
#include <asm/outercache.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#endif
struct thread_info;
struct task_struct;
......
#ifndef _ASM_ARM_TOPOLOGY_H
#define _ASM_ARM_TOPOLOGY_H
#ifdef CONFIG_ARM_CPU_TOPOLOGY
#include <linux/cpumask.h>
struct cputopo_arm {
int thread_id;
int core_id;
int socket_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
};
extern struct cputopo_arm cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define mc_capable() (cpu_topology[0].socket_id != -1)
#define smt_capable() (cpu_topology[0].thread_id != -1)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
#endif
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
......@@ -73,6 +73,7 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
ifneq ($(CONFIG_ARCH_EBSA110),y)
obj-y += io.o
......
......@@ -35,8 +35,8 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/ftrace.h>
#include <asm/exception.h>
#include <asm/system.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
......@@ -58,9 +58,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
#endif
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
#ifdef CONFIG_LOCAL_TIMERS
show_local_irqs(p, prec);
#endif
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
......
......@@ -16,7 +16,6 @@
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/ftrace.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
......@@ -31,6 +30,8 @@
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
......@@ -39,6 +40,7 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/localtimer.h>
#include <asm/smp_plat.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
......@@ -259,6 +261,20 @@ void __ref cpu_die(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
int __cpu_logical_map[NR_CPUS];
void __init smp_setup_processor_id(void)
{
int i;
u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
cpu_logical_map(0) = cpu;
for (i = 1; i < NR_CPUS; ++i)
cpu_logical_map(i) = i == cpu ? 0 : i;
printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
}
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
......@@ -268,6 +284,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
cpu_info->loops_per_jiffy = loops_per_jiffy;
store_cpu_topology(cpuid);
}
/*
......@@ -358,6 +376,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int ncores = num_possible_cpus();
init_cpu_topology();
smp_store_cpu_info(smp_processor_id());
/*
......@@ -437,10 +457,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
for (i = 0; i < NR_IPI; i++)
sum += __get_irq_stat(cpu, ipi_irqs[i]);
#ifdef CONFIG_LOCAL_TIMERS
sum += __get_irq_stat(cpu, local_timer_irqs);
#endif
return sum;
}
......@@ -457,33 +473,6 @@ static void ipi_timer(void)
irq_exit();
}
#ifdef CONFIG_LOCAL_TIMERS
asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id();
if (local_timer_ack()) {
__inc_irq_stat(cpu, local_timer_irqs);
ipi_timer();
}
set_irq_regs(old_regs);
}
void show_local_irqs(struct seq_file *p, int prec)
{
unsigned int cpu;
seq_printf(p, "%*s: ", prec, "LOC");
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
seq_printf(p, " Local timer interrupts\n");
}
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast(const struct cpumask *mask)
{
......@@ -534,7 +523,7 @@ static void percpu_timer_stop(void)
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
local_timer_stop(evt);
}
#endif
......@@ -566,6 +555,11 @@ static void ipi_cpu_stop(unsigned int cpu)
* Main handler for inter-processor interrupts
*/
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
{
handle_IPI(ipinr, regs);
}
void handle_IPI(int ipinr, struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
......
......@@ -33,7 +33,7 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base)
/*
* Enable the SCU
*/
void __init scu_enable(void __iomem *scu_base)
void scu_enable(void __iomem *scu_base)
{
u32 scu_ctrl;
......
......@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
#include <asm/hardware/gic.h>
/* set up by the platform code */
......@@ -26,6 +27,8 @@ void __iomem *twd_base;
static unsigned long twd_timer_rate;
static struct clock_event_device __percpu **twd_evt;
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
......@@ -80,6 +83,12 @@ int twd_timer_ack(void)
return 0;
}
void twd_timer_stop(struct clock_event_device *clk)
{
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
}
static void __cpuinit twd_calibrate_rate(void)
{
unsigned long count;
......@@ -119,11 +128,43 @@ static void __cpuinit twd_calibrate_rate(void)
}
}
static irqreturn_t twd_handler(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
if (twd_timer_ack()) {
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/*
* Setup the local clock events for a CPU.
*/
void __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
if (!twd_evt) {
int err;
twd_evt = alloc_percpu(struct clock_event_device *);
if (!twd_evt) {
pr_err("twd: can't allocate memory\n");
return;
}
err = request_percpu_irq(clk->irq, twd_handler,
"twd", twd_evt);
if (err) {
pr_err("twd: can't register interrupt %d (%d)\n",
clk->irq, err);
return;
}
}
twd_calibrate_rate();
clk->name = "local_timer";
......@@ -137,8 +178,10 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
clockevents_register_device(clk);
/* Make sure our local interrupt controller has this enabled */
gic_enable_ppi(clk->irq);
enable_percpu_irq(clk->irq, 0);
}
/*
* arch/arm/kernel/topology.c
*
* Copyright (C) 2011 Linaro Limited.
* Written by: Vincent Guittot
*
* based on arch/sh/kernel/topology.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/topology.h>
#define MPIDR_SMP_BITMASK (0x3 << 30)
#define MPIDR_SMP_VALUE (0x2 << 30)
#define MPIDR_MT_BITMASK (0x1 << 24)
/*
* These masks reflect the current use of the affinity levels.
* The affinity level can be up to 16 bits according to ARM ARM
*/
#define MPIDR_LEVEL0_MASK 0x3
#define MPIDR_LEVEL0_SHIFT 0
#define MPIDR_LEVEL1_MASK 0xF
#define MPIDR_LEVEL1_SHIFT 8
#define MPIDR_LEVEL2_MASK 0xFF
#define MPIDR_LEVEL2_SHIFT 16
struct cputopo_arm cpu_topology[NR_CPUS];
const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
* which prevents simultaneous write access to cpu_topology array
*/
void store_cpu_topology(unsigned int cpuid)
{
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
unsigned int mpidr;
unsigned int cpu;
/* If the cpu topology has been already set, just return */
if (cpuid_topo->core_id != -1)
return;
mpidr = read_cpuid_mpidr();
/* create cpu topology mapping */
if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
/*
* This is a multiprocessor system
* multiprocessor format & multiprocessor mode field are set
*/
if (mpidr & MPIDR_MT_BITMASK) {
/* core performance interdependency */
cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
& MPIDR_LEVEL0_MASK;
cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
& MPIDR_LEVEL1_MASK;
cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
& MPIDR_LEVEL2_MASK;
} else {
/* largely independent cores */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
& MPIDR_LEVEL0_MASK;
cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
& MPIDR_LEVEL1_MASK;
}
} else {
/*
* This is an uniprocessor system
* we are in multiprocessor format but uniprocessor system
* or in the old uniprocessor format
*/
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = 0;
cpuid_topo->socket_id = -1;
}
/* update core and thread sibling masks */
for_each_possible_cpu(cpu) {
struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->socket_id == cpu_topo->socket_id) {
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu,
&cpuid_topo->core_sibling);
if (cpuid_topo->core_id == cpu_topo->core_id) {
cpumask_set_cpu(cpuid,
&cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu,
&cpuid_topo->thread_sibling);
}
}
}
smp_wmb();
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
cpu_topology[cpuid].core_id,
cpu_topology[cpuid].socket_id, mpidr);
}
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
void init_cpu_topology(void)
{
unsigned int cpu;
/* init core mask */
for_each_possible_cpu(cpu) {
struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
cpu_topo->socket_id = -1;
cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
}
smp_wmb();
}
......@@ -27,6 +27,7 @@
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/traps.h>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment