Skip to content
Snippets Groups Projects
Select Git revision
  • 4350147a816b9c5b40fa59e4fa23f17490630b79
  • seco_lf-6.6.52-2.2.1 default protected
  • integrate/gitlab-ci/use-board-only-instead-codename-and-board-in-the-configuration/into/seco_lf-6.6.52-2.2.1
  • seco_lf-6.6.52-2.2.1-tr8mp-dtb
  • seco_lf-6.6.52-2.2.1-tr8mp-mcu
  • integrate/gitlab-ci/use-board-only-instead-codename-and-board-in-the-configuration/into/seco_lf-5.10.y
  • seco_lf-6.6.23-2.0.0_e39-e83-p4-devicetree
  • integrate/gitlab-ci/cleaos-894-rename-distros-into-build-tergets/into/seco_lf-5.10.y
  • integrate/gitlab-ci/cleaos-894-rename-distros-into-build-tergets/into/seco_lf-6.6.52-2.2.1
  • seco_lf-5.10.y protected
  • seco_lf-6.6.52-2.2.1_e88-dev
  • seco_lf-6.6.52-2.2.1_ov5640-mx95-dev
  • seco_lf-6.6.52-2.2.1-tr8mp-rgb-defconfig
  • seco_lf-6.6.52-2.2.1-tr8mp-dev
  • seco_lf-6.6.52-2.2.1-tr8mp-dtbo
  • seco_lf-6.6.52-2.2.1-tr8mp-rv3028
  • seco_lf-6.6.52-2.2.1-tr8mp-fpga
  • seco_lf-6.6.52-2.2.1_stm32g0-dev
  • seco_lf-6.6.52-2.2.1_remove-mwifiex_d18
  • seco_lf-6.6.52-2.2.1_e88-dbg-uart-dev
  • seco_lf_v2024.04_6.6.52_2.2.x-e39-e83-devicetrees
  • lf-6.6.52-2.2.1
  • lf-6.1.55-2.2.1
  • lf-6.6.3-1.0.0
  • lf-6.6.3-imx95-er2
  • lf-6.1.55-2.2.0
  • lf-6.6.y-imx95-er1
  • lf-5.15.71-2.2.2
  • lf-6.1.36-2.1.0
  • lf-5.15.71-2.2.1
  • lf-6.1.22-2.0.0
  • lf-6.1.1-1.0.1
  • rel_imx_5.4.24_2.1.4
  • rel_imx_4.9.88_2.0.13
  • rel_imx_4.14.98_2.3.5
  • lf-6.1.1-1.0.0
  • rel_imx_5.4.3_2.0.2
  • lf-5.15.71-2.2.0
  • lf-5.10.72-2.2.3
  • lf-5.15.52-2.1.0
  • imx_5.15.52_imx8ulp_er1
41 results

reg.h

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    cpu.c 23.05 KiB
    /* CPU control.
     * (C) 2001, 2002, 2003, 2004 Rusty Russell
     *
     * This code is licenced under the GPL.
     */
    #include <linux/proc_fs.h>
    #include <linux/smp.h>
    #include <linux/init.h>
    #include <linux/notifier.h>
    #include <linux/sched.h>
    #include <linux/unistd.h>
    #include <linux/cpu.h>
    #include <linux/oom.h>
    #include <linux/rcupdate.h>
    #include <linux/export.h>
    #include <linux/bug.h>
    #include <linux/kthread.h>
    #include <linux/stop_machine.h>
    #include <linux/mutex.h>
    #include <linux/gfp.h>
    #include <linux/suspend.h>
    #include <linux/lockdep.h>
    #include <linux/tick.h>
    #include <linux/irq.h>
    
    #include <trace/events/power.h>
    #define CREATE_TRACE_POINTS
    #include <trace/events/cpuhp.h>
    
    #include "smpboot.h"
    
    /**
     * cpuhp_cpu_state - Per cpu hotplug state storage
     * @state:	The current cpu state
     * @target:	The target state
     */
    struct cpuhp_cpu_state {
    	enum cpuhp_state	state;
    	enum cpuhp_state	target;
    };
    
    static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
    
    /**
     * cpuhp_step - Hotplug state machine step
     * @name:	Name of the step
     * @startup:	Startup function of the step
     * @teardown:	Teardown function of the step
     * @skip_onerr:	Do not invoke the functions on error rollback
     *		Will go away once the notifiers	are gone
     */
    struct cpuhp_step {
    	const char	*name;
    	int		(*startup)(unsigned int cpu);
    	int		(*teardown)(unsigned int cpu);
    	bool		skip_onerr;
    };
    
    static struct cpuhp_step cpuhp_bp_states[];
    
    /**
     * cpuhp_invoke_callback _ Invoke the callbacks for a given state
     * @cpu:	The cpu for which the callback should be invoked
     * @step:	The step in the state machine
     * @cb:		The callback function to invoke
     *
     * Called from cpu hotplug and from the state register machinery
     */
    static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
    				 int (*cb)(unsigned int))
    {
    	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    	int ret = 0;
    
    	if (cb) {
    		trace_cpuhp_enter(cpu, st->target, step, cb);
    		ret = cb(cpu);
    		trace_cpuhp_exit(cpu, st->state, step, ret);
    	}
    	return ret;
    }
    
    #ifdef CONFIG_SMP
    /* Serializes the updates to cpu_online_mask, cpu_present_mask */
    static DEFINE_MUTEX(cpu_add_remove_lock);
    bool cpuhp_tasks_frozen;
    EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
    
    /*
     * The following two APIs (cpu_maps_update_begin/done) must be used when
     * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
     * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
     * hotplug callback (un)registration performed using __register_cpu_notifier()
     * or __unregister_cpu_notifier().
     */
    void cpu_maps_update_begin(void)
    {
    	mutex_lock(&cpu_add_remove_lock);
    }
    EXPORT_SYMBOL(cpu_notifier_register_begin);
    
    void cpu_maps_update_done(void)
    {
    	mutex_unlock(&cpu_add_remove_lock);
    }
    EXPORT_SYMBOL(cpu_notifier_register_done);
    
    static RAW_NOTIFIER_HEAD(cpu_chain);
    
    /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
     * Should always be manipulated under cpu_add_remove_lock
     */
    static int cpu_hotplug_disabled;
    
    #ifdef CONFIG_HOTPLUG_CPU
    
    static struct {
    	struct task_struct *active_writer;
    	/* wait queue to wake up the active_writer */
    	wait_queue_head_t wq;
    	/* verifies that no writer will get active while readers are active */
    	struct mutex lock;
    	/*
    	 * Also blocks the new readers during
    	 * an ongoing cpu hotplug operation.
    	 */
    	atomic_t refcount;
    
    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    	struct lockdep_map dep_map;
    #endif
    } cpu_hotplug = {
    	.active_writer = NULL,
    	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
    	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    	.dep_map = {.name = "cpu_hotplug.lock" },
    #endif
    };
    
    /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
    #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
    #define cpuhp_lock_acquire_tryread() \
    				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
    #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
    #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
    
    
    void get_online_cpus(void)
    {
    	might_sleep();
    	if (cpu_hotplug.active_writer == current)
    		return;
    	cpuhp_lock_acquire_read();
    	mutex_lock(&cpu_hotplug.lock);
    	atomic_inc(&cpu_hotplug.refcount);
    	mutex_unlock(&cpu_hotplug.lock);
    }
    EXPORT_SYMBOL_GPL(get_online_cpus);
    
    void put_online_cpus(void)
    {
    	int refcount;
    
    	if (cpu_hotplug.active_writer == current)
    		return;
    
    	refcount = atomic_dec_return(&cpu_hotplug.refcount);
    	if (WARN_ON(refcount < 0)) /* try to fix things up */
    		atomic_inc(&cpu_hotplug.refcount);
    
    	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
    		wake_up(&cpu_hotplug.wq);
    
    	cpuhp_lock_release();
    
    }
    EXPORT_SYMBOL_GPL(put_online_cpus);
    
    /*
     * This ensures that the hotplug operation can begin only when the
     * refcount goes to zero.
     *
     * Note that during a cpu-hotplug operation, the new readers, if any,
     * will be blocked by the cpu_hotplug.lock
     *
     * Since cpu_hotplug_begin() is always called after invoking
     * cpu_maps_update_begin(), we can be sure that only one writer is active.
     *
     * Note that theoretically, there is a possibility of a livelock:
     * - Refcount goes to zero, last reader wakes up the sleeping
     *   writer.
     * - Last reader unlocks the cpu_hotplug.lock.
     * - A new reader arrives at this moment, bumps up the refcount.
     * - The writer acquires the cpu_hotplug.lock finds the refcount
     *   non zero and goes to sleep again.
     *
     * However, this is very difficult to achieve in practice since
     * get_online_cpus() not an api which is called all that often.
     *
     */
    void cpu_hotplug_begin(void)
    {
    	DEFINE_WAIT(wait);
    
    	cpu_hotplug.active_writer = current;
    	cpuhp_lock_acquire();
    
    	for (;;) {
    		mutex_lock(&cpu_hotplug.lock);
    		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
    		if (likely(!atomic_read(&cpu_hotplug.refcount)))
    				break;
    		mutex_unlock(&cpu_hotplug.lock);
    		schedule();
    	}
    	finish_wait(&cpu_hotplug.wq, &wait);
    }
    
    void cpu_hotplug_done(void)
    {
    	cpu_hotplug.active_writer = NULL;
    	mutex_unlock(&cpu_hotplug.lock);
    	cpuhp_lock_release();
    }
    
    /*
     * Wait for currently running CPU hotplug operations to complete (if any) and
     * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
     * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
     * hotplug path before performing hotplug operations. So acquiring that lock
     * guarantees mutual exclusion from any currently running hotplug operations.
     */
    void cpu_hotplug_disable(void)
    {
    	cpu_maps_update_begin();
    	cpu_hotplug_disabled++;
    	cpu_maps_update_done();
    }
    EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
    
    void cpu_hotplug_enable(void)
    {
    	cpu_maps_update_begin();
    	WARN_ON(--cpu_hotplug_disabled < 0);
    	cpu_maps_update_done();
    }
    EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
    #endif	/* CONFIG_HOTPLUG_CPU */
    
    /* Need to know about CPUs going up/down? */
    int register_cpu_notifier(struct notifier_block *nb)
    {
    	int ret;
    	cpu_maps_update_begin();
    	ret = raw_notifier_chain_register(&cpu_chain, nb);
    	cpu_maps_update_done();
    	return ret;
    }
    
    int __register_cpu_notifier(struct notifier_block *nb)
    {
    	return raw_notifier_chain_register(&cpu_chain, nb);
    }
    
    static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
    			int *nr_calls)
    {
    	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
    	void *hcpu = (void *)(long)cpu;
    
    	int ret;
    
    	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
    					nr_calls);
    
    	return notifier_to_errno(ret);
    }
    
    static int cpu_notify(unsigned long val, unsigned int cpu)
    {
    	return __cpu_notify(val, cpu, -1, NULL);
    }
    
    /* Notifier wrappers for transitioning to state machine */
    static int notify_prepare(unsigned int cpu)
    {
    	int nr_calls = 0;
    	int ret;
    
    	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
    	if (ret) {
    		nr_calls--;
    		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
    				__func__, cpu);
    		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
    	}
    	return ret;
    }
    
    static int notify_online(unsigned int cpu)
    {
    	cpu_notify(CPU_ONLINE, cpu);
    	return 0;
    }
    
    static int bringup_cpu(unsigned int cpu)
    {
    	struct task_struct *idle = idle_thread_get(cpu);
    	int ret;
    
    	/* Arch-specific enabling code. */
    	ret = __cpu_up(cpu, idle);
    	if (ret) {
    		cpu_notify(CPU_UP_CANCELED, cpu);
    		return ret;
    	}
    	BUG_ON(!cpu_online(cpu));
    	return 0;
    }
    
    #ifdef CONFIG_HOTPLUG_CPU
    EXPORT_SYMBOL(register_cpu_notifier);
    EXPORT_SYMBOL(__register_cpu_notifier);
    
    void unregister_cpu_notifier(struct notifier_block *nb)
    {
    	cpu_maps_update_begin();
    	raw_notifier_chain_unregister(&cpu_chain, nb);
    	cpu_maps_update_done();
    }
    EXPORT_SYMBOL(unregister_cpu_notifier);
    
    void __unregister_cpu_notifier(struct notifier_block *nb)
    {
    	raw_notifier_chain_unregister(&cpu_chain, nb);
    }
    EXPORT_SYMBOL(__unregister_cpu_notifier);
    
    /**
     * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
     * @cpu: a CPU id
     *
     * This function walks all processes, finds a valid mm struct for each one and
     * then clears a corresponding bit in mm's cpumask.  While this all sounds
     * trivial, there are various non-obvious corner cases, which this function
     * tries to solve in a safe manner.
     *
     * Also note that the function uses a somewhat relaxed locking scheme, so it may
     * be called only for an already offlined CPU.
     */
    void clear_tasks_mm_cpumask(int cpu)
    {
    	struct task_struct *p;
    
    	/*
    	 * This function is called after the cpu is taken down and marked
    	 * offline, so its not like new tasks will ever get this cpu set in
    	 * their mm mask. -- Peter Zijlstra
    	 * Thus, we may use rcu_read_lock() here, instead of grabbing
    	 * full-fledged tasklist_lock.
    	 */
    	WARN_ON(cpu_online(cpu));
    	rcu_read_lock();
    	for_each_process(p) {
    		struct task_struct *t;
    
    		/*
    		 * Main thread might exit, but other threads may still have
    		 * a valid mm. Find one.
    		 */
    		t = find_lock_task_mm(p);
    		if (!t)
    			continue;
    		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
    		task_unlock(t);
    	}
    	rcu_read_unlock();
    }
    
    static inline void check_for_tasks(int dead_cpu)
    {
    	struct task_struct *g, *p;
    
    	read_lock(&tasklist_lock);
    	for_each_process_thread(g, p) {
    		if (!p->on_rq)
    			continue;
    		/*
    		 * We do the check with unlocked task_rq(p)->lock.
    		 * Order the reading to do not warn about a task,
    		 * which was running on this cpu in the past, and
    		 * it's just been woken on another cpu.
    		 */
    		rmb();
    		if (task_cpu(p) != dead_cpu)
    			continue;
    
    		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
    			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
    	}
    	read_unlock(&tasklist_lock);
    }
    
    static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
    {
    	BUG_ON(cpu_notify(val, cpu));
    }
    
    static int notify_down_prepare(unsigned int cpu)
    {
    	int err, nr_calls = 0;
    
    	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
    	if (err) {
    		nr_calls--;
    		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
    		pr_warn("%s: attempt to take down CPU %u failed\n",
    				__func__, cpu);
    	}
    	return err;
    }
    
    /* Take this CPU down. */
    static int take_cpu_down(void *_param)
    {
    	int err, cpu = smp_processor_id();
    
    	/* Ensure this CPU doesn't handle any more interrupts. */
    	err = __cpu_disable();
    	if (err < 0)
    		return err;
    
    	cpu_notify(CPU_DYING, cpu);
    	/* Give up timekeeping duties */
    	tick_handover_do_timer();
    	/* Park the stopper thread */
    	stop_machine_park(cpu);
    	return 0;
    }
    
    static int takedown_cpu(unsigned int cpu)
    {
    	int err;
    
    	/*
    	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
    	 * and RCU users of this state to go away such that all new such users
    	 * will observe it.
    	 *
    	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
    	 * not imply sync_sched(), so wait for both.
    	 *
    	 * Do sync before park smpboot threads to take care the rcu boost case.
    	 */
    	if (IS_ENABLED(CONFIG_PREEMPT))
    		synchronize_rcu_mult(call_rcu, call_rcu_sched);
    	else
    		synchronize_rcu();
    
    	smpboot_park_threads(cpu);
    
    	/*
    	 * Prevent irq alloc/free while the dying cpu reorganizes the
    	 * interrupt affinities.
    	 */
    	irq_lock_sparse();
    
    	/*
    	 * So now all preempt/rcu users must observe !cpu_active().
    	 */
    	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
    	if (err) {
    		/* CPU didn't die: tell everyone.  Can't complain. */
    		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
    		irq_unlock_sparse();
    		return err;
    	}
    	BUG_ON(cpu_online(cpu));
    
    	/*
    	 * The migration_call() CPU_DYING callback will have removed all
    	 * runnable tasks from the cpu, there's only the idle task left now
    	 * that the migration thread is done doing the stop_machine thing.
    	 *
    	 * Wait for the stop thread to go away.
    	 */
    	while (!per_cpu(cpu_dead_idle, cpu))
    		cpu_relax();
    	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
    	per_cpu(cpu_dead_idle, cpu) = false;
    
    	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
    	irq_unlock_sparse();
    
    	hotplug_cpu__broadcast_tick_pull(cpu);
    	/* This actually kills the CPU. */
    	__cpu_die(cpu);
    
    	tick_cleanup_dead_cpu(cpu);
    	return 0;
    }
    
    static int notify_dead(unsigned int cpu)
    {
    	cpu_notify_nofail(CPU_DEAD, cpu);
    	check_for_tasks(cpu);
    	return 0;
    }
    
    #else
    #define notify_down_prepare	NULL
    #define takedown_cpu		NULL
    #define notify_dead		NULL
    #endif
    
    #ifdef CONFIG_HOTPLUG_CPU
    static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
    {
    	for (st->state++; st->state < st->target; st->state++) {
    		struct cpuhp_step *step = cpuhp_bp_states + st->state;
    
    		if (!step->skip_onerr)
    			cpuhp_invoke_callback(cpu, st->state, step->startup);
    	}
    }
    
    /* Requires cpu_add_remove_lock to be held */
    static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
    {
    	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    	int prev_state, ret = 0;
    	bool hasdied = false;
    
    	if (num_online_cpus() == 1)
    		return -EBUSY;
    
    	if (!cpu_online(cpu))
    		return -EINVAL;
    
    	cpu_hotplug_begin();
    
    	cpuhp_tasks_frozen = tasks_frozen;
    
    	prev_state = st->state;
    	st->target = CPUHP_OFFLINE;
    	for (; st->state > st->target; st->state--) {
    		struct cpuhp_step *step = cpuhp_bp_states + st->state;
    
    		ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
    		if (ret) {
    			st->target = prev_state;
    			undo_cpu_down(cpu, st);
    			break;
    		}
    	}
    	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
    
    	cpu_hotplug_done();
    	/* This post dead nonsense must die */
    	if (!ret && hasdied)
    		cpu_notify_nofail(CPU_POST_DEAD, cpu);
    	return ret;
    }
    
    int cpu_down(unsigned int cpu)
    {
    	int err;
    
    	cpu_maps_update_begin();
    
    	if (cpu_hotplug_disabled) {
    		err = -EBUSY;
    		goto out;
    	}
    
    	err = _cpu_down(cpu, 0);
    
    out:
    	cpu_maps_update_done();
    	return err;
    }
    EXPORT_SYMBOL(cpu_down);
    #endif /*CONFIG_HOTPLUG_CPU*/
    
    /*
     * Unpark per-CPU smpboot kthreads at CPU-online time.
     */
    static int smpboot_thread_call(struct notifier_block *nfb,
    			       unsigned long action, void *hcpu)
    {
    	int cpu = (long)hcpu;
    
    	switch (action & ~CPU_TASKS_FROZEN) {
    
    	case CPU_DOWN_FAILED:
    	case CPU_ONLINE:
    		smpboot_unpark_threads(cpu);
    		break;
    
    	default:
    		break;
    	}
    
    	return NOTIFY_OK;
    }
    
    static struct notifier_block smpboot_thread_notifier = {
    	.notifier_call = smpboot_thread_call,
    	.priority = CPU_PRI_SMPBOOT,
    };
    
    void smpboot_thread_init(void)
    {
    	register_cpu_notifier(&smpboot_thread_notifier);
    }
    
    static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
    {
    	for (st->state--; st->state > st->target; st->state--) {
    		struct cpuhp_step *step = cpuhp_bp_states + st->state;
    
    		if (!step->skip_onerr)
    			cpuhp_invoke_callback(cpu, st->state, step->teardown);
    	}
    }
    
    /* Requires cpu_add_remove_lock to be held */
    static int _cpu_up(unsigned int cpu, int tasks_frozen)
    {
    	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    	struct task_struct *idle;
    	int prev_state, ret = 0;
    
    	cpu_hotplug_begin();
    
    	if (cpu_online(cpu) || !cpu_present(cpu)) {
    		ret = -EINVAL;
    		goto out;
    	}
    
    	/* Let it fail before we try to bring the cpu up */
    	idle = idle_thread_get(cpu);
    	if (IS_ERR(idle)) {
    		ret = PTR_ERR(idle);
    		goto out;
    	}
    
    	cpuhp_tasks_frozen = tasks_frozen;
    
    	prev_state = st->state;
    	st->target = CPUHP_ONLINE;
    	while (st->state < st->target) {
    		struct cpuhp_step *step;
    
    		st->state++;
    		step = cpuhp_bp_states + st->state;
    		ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
    		if (ret) {
    			st->target = prev_state;
    			undo_cpu_up(cpu, st);
    			break;
    		}
    	}
    out:
    	cpu_hotplug_done();
    	return ret;
    }
    
    int cpu_up(unsigned int cpu)
    {
    	int err = 0;
    
    	if (!cpu_possible(cpu)) {
    		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
    		       cpu);
    #if defined(CONFIG_IA64)
    		pr_err("please check additional_cpus= boot parameter\n");
    #endif
    		return -EINVAL;
    	}
    
    	err = try_online_node(cpu_to_node(cpu));
    	if (err)
    		return err;
    
    	cpu_maps_update_begin();
    
    	if (cpu_hotplug_disabled) {
    		err = -EBUSY;
    		goto out;
    	}
    
    	err = _cpu_up(cpu, 0);
    
    out:
    	cpu_maps_update_done();
    	return err;
    }
    EXPORT_SYMBOL_GPL(cpu_up);
    
    #ifdef CONFIG_PM_SLEEP_SMP
    static cpumask_var_t frozen_cpus;
    
    int disable_nonboot_cpus(void)
    {
    	int cpu, first_cpu, error = 0;
    
    	cpu_maps_update_begin();
    	first_cpu = cpumask_first(cpu_online_mask);
    	/*
    	 * We take down all of the non-boot CPUs in one shot to avoid races
    	 * with the userspace trying to use the CPU hotplug at the same time
    	 */
    	cpumask_clear(frozen_cpus);
    
    	pr_info("Disabling non-boot CPUs ...\n");
    	for_each_online_cpu(cpu) {
    		if (cpu == first_cpu)
    			continue;
    		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
    		error = _cpu_down(cpu, 1);
    		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
    		if (!error)
    			cpumask_set_cpu(cpu, frozen_cpus);
    		else {
    			pr_err("Error taking CPU%d down: %d\n", cpu, error);
    			break;
    		}
    	}
    
    	if (!error)
    		BUG_ON(num_online_cpus() > 1);
    	else
    		pr_err("Non-boot CPUs are not disabled\n");
    
    	/*
    	 * Make sure the CPUs won't be enabled by someone else. We need to do
    	 * this even in case of failure as all disable_nonboot_cpus() users are
    	 * supposed to do enable_nonboot_cpus() on the failure path.
    	 */
    	cpu_hotplug_disabled++;
    
    	cpu_maps_update_done();
    	return error;
    }
    
    void __weak arch_enable_nonboot_cpus_begin(void)
    {
    }
    
    void __weak arch_enable_nonboot_cpus_end(void)
    {
    }
    
    void enable_nonboot_cpus(void)
    {
    	int cpu, error;
    
    	/* Allow everyone to use the CPU hotplug again */
    	cpu_maps_update_begin();
    	WARN_ON(--cpu_hotplug_disabled < 0);
    	if (cpumask_empty(frozen_cpus))
    		goto out;
    
    	pr_info("Enabling non-boot CPUs ...\n");
    
    	arch_enable_nonboot_cpus_begin();
    
    	for_each_cpu(cpu, frozen_cpus) {
    		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
    		error = _cpu_up(cpu, 1);
    		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
    		if (!error) {
    			pr_info("CPU%d is up\n", cpu);
    			continue;
    		}
    		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
    	}
    
    	arch_enable_nonboot_cpus_end();
    
    	cpumask_clear(frozen_cpus);
    out:
    	cpu_maps_update_done();
    }
    
    static int __init alloc_frozen_cpus(void)
    {
    	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
    		return -ENOMEM;
    	return 0;
    }
    core_initcall(alloc_frozen_cpus);
    
    /*
     * When callbacks for CPU hotplug notifications are being executed, we must
     * ensure that the state of the system with respect to the tasks being frozen
     * or not, as reported by the notification, remains unchanged *throughout the
     * duration* of the execution of the callbacks.
     * Hence we need to prevent the freezer from racing with regular CPU hotplug.
     *
     * This synchronization is implemented by mutually excluding regular CPU
     * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
     * Hibernate notifications.
     */
    static int
    cpu_hotplug_pm_callback(struct notifier_block *nb,
    			unsigned long action, void *ptr)
    {
    	switch (action) {
    
    	case PM_SUSPEND_PREPARE:
    	case PM_HIBERNATION_PREPARE:
    		cpu_hotplug_disable();
    		break;
    
    	case PM_POST_SUSPEND:
    	case PM_POST_HIBERNATION:
    		cpu_hotplug_enable();
    		break;
    
    	default:
    		return NOTIFY_DONE;
    	}
    
    	return NOTIFY_OK;
    }
    
    
    static int __init cpu_hotplug_pm_sync_init(void)
    {
    	/*
    	 * cpu_hotplug_pm_callback has higher priority than x86
    	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
    	 * to disable cpu hotplug to avoid cpu hotplug race.
    	 */
    	pm_notifier(cpu_hotplug_pm_callback, 0);
    	return 0;
    }
    core_initcall(cpu_hotplug_pm_sync_init);
    
    #endif /* CONFIG_PM_SLEEP_SMP */
    
    /**
     * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
     * @cpu: cpu that just started
     *
     * This function calls the cpu_chain notifiers with CPU_STARTING.
     * It must be called by the arch code on the new cpu, before the new cpu
     * enables interrupts and before the "boot" cpu returns from __cpu_up().
     */
    void notify_cpu_starting(unsigned int cpu)
    {
    	cpu_notify(CPU_STARTING, cpu);
    }
    
    #endif /* CONFIG_SMP */
    
    /* Boot processor state steps */
    static struct cpuhp_step cpuhp_bp_states[] = {
    	[CPUHP_OFFLINE] = {
    		.name			= "offline",
    		.startup		= NULL,
    		.teardown		= NULL,
    	},
    #ifdef CONFIG_SMP
    	[CPUHP_CREATE_THREADS]= {
    		.name			= "threads:create",
    		.startup		= smpboot_create_threads,
    		.teardown		= NULL,
    	},
    	[CPUHP_NOTIFY_PREPARE] = {
    		.name			= "notify:prepare",
    		.startup		= notify_prepare,
    		.teardown		= notify_dead,
    		.skip_onerr		= true,
    	},
    	[CPUHP_BRINGUP_CPU] = {
    		.name			= "cpu:bringup",
    		.startup		= bringup_cpu,
    		.teardown		= takedown_cpu,
    		.skip_onerr		= true,
    	},
    	[CPUHP_NOTIFY_ONLINE] = {
    		.name			= "notify:online",
    		.startup		= notify_online,
    		.teardown		= notify_down_prepare,
    	},
    #endif
    	[CPUHP_ONLINE] = {
    		.name			= "online",
    		.startup		= NULL,
    		.teardown		= NULL,
    	},
    };
    
    /*
     * cpu_bit_bitmap[] is a special, "compressed" data structure that
     * represents all NR_CPUS bits binary values of 1<<nr.
     *
     * It is used by cpumask_of() to get a constant address to a CPU
     * mask value that has a single bit set only.
     */
    
    /* cpu_bit_bitmap[0] is empty - so we can back into it */
    #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
    #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
    #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
    #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
    
    const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
    
    	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
    	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
    #if BITS_PER_LONG > 32
    	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
    	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
    #endif
    };
    EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
    
    const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
    EXPORT_SYMBOL(cpu_all_bits);
    
    #ifdef CONFIG_INIT_ALL_POSSIBLE
    struct cpumask __cpu_possible_mask __read_mostly
    	= {CPU_BITS_ALL};
    #else
    struct cpumask __cpu_possible_mask __read_mostly;
    #endif
    EXPORT_SYMBOL(__cpu_possible_mask);
    
    struct cpumask __cpu_online_mask __read_mostly;
    EXPORT_SYMBOL(__cpu_online_mask);
    
    struct cpumask __cpu_present_mask __read_mostly;
    EXPORT_SYMBOL(__cpu_present_mask);
    
    struct cpumask __cpu_active_mask __read_mostly;
    EXPORT_SYMBOL(__cpu_active_mask);
    
    void init_cpu_present(const struct cpumask *src)
    {
    	cpumask_copy(&__cpu_present_mask, src);
    }
    
    void init_cpu_possible(const struct cpumask *src)
    {
    	cpumask_copy(&__cpu_possible_mask, src);
    }
    
    void init_cpu_online(const struct cpumask *src)
    {
    	cpumask_copy(&__cpu_online_mask, src);
    }
    
    /*
     * Activate the first processor.
     */
    void __init boot_cpu_init(void)
    {
    	int cpu = smp_processor_id();
    
    	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
    	set_cpu_online(cpu, true);
    	set_cpu_active(cpu, true);
    	set_cpu_present(cpu, true);
    	set_cpu_possible(cpu, true);
    }
    
    /*
     * Must be called _AFTER_ setting up the per_cpu areas
     */
    void __init boot_cpu_state_init(void)
    {
    	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
    }