Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
linux-seco-imx
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Jira
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Deploy
Releases
Package registry
Model registry
Operate
Terraform modules
Analyze
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Clea OS
bsp
nxp
linux-seco-imx
Commits
05c2d088
Commit
05c2d088
authored
Dec 18, 2010
by
Tejun Heo
Browse files
Options
Downloads
Plain Diff
Merge branch 'this_cpu_ops' into for-2.6.38
parents
3ea9f683
8270137a
No related branches found
No related tags found
No related merge requests found
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
arch/x86/Kconfig.cpu
+3
-0
3 additions, 0 deletions
arch/x86/Kconfig.cpu
arch/x86/include/asm/percpu.h
+150
-37
150 additions, 37 deletions
arch/x86/include/asm/percpu.h
include/linux/percpu.h
+163
-31
163 additions, 31 deletions
include/linux/percpu.h
with
316 additions
and
68 deletions
arch/x86/Kconfig.cpu
+
3
−
0
View file @
05c2d088
...
@@ -310,6 +310,9 @@ config X86_INTERNODE_CACHE_SHIFT
...
@@ -310,6 +310,9 @@ config X86_INTERNODE_CACHE_SHIFT
config X86_CMPXCHG
config X86_CMPXCHG
def_bool X86_64 || (X86_32 && !M386)
def_bool X86_64 || (X86_32 && !M386)
config CMPXCHG_LOCAL
def_bool X86_64 || (X86_32 && !M386)
config X86_L1_CACHE_SHIFT
config X86_L1_CACHE_SHIFT
int
int
default "7" if MPENTIUM4 || MPSC
default "7" if MPENTIUM4 || MPSC
...
...
This diff is collapsed.
Click to expand it.
arch/x86/include/asm/percpu.h
+
150
−
37
View file @
05c2d088
...
@@ -177,6 +177,58 @@ do { \
...
@@ -177,6 +177,58 @@ do { \
} \
} \
} while (0)
} while (0)
#define percpu_from_op(op, var, constraint) \
({ \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
: "=q" (pfo_ret__) \
: constraint); \
break; \
case 2: \
asm(op "w "__percpu_arg(1)",%0" \
: "=r" (pfo_ret__) \
: constraint); \
break; \
case 4: \
asm(op "l "__percpu_arg(1)",%0" \
: "=r" (pfo_ret__) \
: constraint); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
: "=r" (pfo_ret__) \
: constraint); \
break; \
default: __bad_percpu_size(); \
} \
pfo_ret__; \
})
#define percpu_unary_op(op, var) \
({ \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(0) \
: "+m" (var)); \
break; \
case 2: \
asm(op "w "__percpu_arg(0) \
: "+m" (var)); \
break; \
case 4: \
asm(op "l "__percpu_arg(0) \
: "+m" (var)); \
break; \
case 8: \
asm(op "q "__percpu_arg(0) \
: "+m" (var)); \
break; \
default: __bad_percpu_size(); \
} \
})
/*
/*
* Add return operation
* Add return operation
*/
*/
...
@@ -210,56 +262,90 @@ do { \
...
@@ -210,56 +262,90 @@ do { \
paro_ret__; \
paro_ret__; \
})
})
#define percpu_from_op(op, var, constraint) \
/*
* xchg is implemented using cmpxchg without a lock prefix. xchg is
* expensive due to the implied lock prefix. The processor cannot prefetch
* cachelines if xchg is used.
*/
#define percpu_xchg_op(var, nval) \
({ \
({ \
typeof(var) pfo_ret__; \
typeof(var) pxo_ret__; \
typeof(var) pxo_new__ = (nval); \
switch (sizeof(var)) { \
switch (sizeof(var)) { \
case 1: \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
asm("\n1:mov "__percpu_arg(1)",%%al" \
: "=q" (pfo_ret__) \
"\n\tcmpxchgb %2, "__percpu_arg(1) \
: constraint); \
"\n\tjnz 1b" \
: "=a" (pxo_ret__), "+m" (var) \
: "q" (pxo_new__) \
: "memory"); \
break; \
break; \
case 2: \
case 2: \
asm(op "w "__percpu_arg(1)",%0" \
asm("\n1:mov "__percpu_arg(1)",%%ax" \
: "=r" (pfo_ret__) \
"\n\tcmpxchgw %2, "__percpu_arg(1) \
: constraint); \
"\n\tjnz 1b" \
: "=a" (pxo_ret__), "+m" (var) \
: "r" (pxo_new__) \
: "memory"); \
break; \
break; \
case 4: \
case 4: \
asm(op "l "__percpu_arg(1)",%0" \
asm("\n1:mov "__percpu_arg(1)",%%eax" \
: "=r" (pfo_ret__) \
"\n\tcmpxchgl %2, "__percpu_arg(1) \
: constraint); \
"\n\tjnz 1b" \
: "=a" (pxo_ret__), "+m" (var) \
: "r" (pxo_new__) \
: "memory"); \
break; \
break; \
case 8: \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
asm("\n1:mov "__percpu_arg(1)",%%rax" \
: "=r" (pfo_ret__) \
"\n\tcmpxchgq %2, "__percpu_arg(1) \
: constraint); \
"\n\tjnz 1b" \
: "=a" (pxo_ret__), "+m" (var) \
: "r" (pxo_new__) \
: "memory"); \
break; \
break; \
default: __bad_percpu_size(); \
default: __bad_percpu_size(); \
} \
} \
p
f
o_ret__; \
p
x
o_ret__;
\
})
})
#define percpu_unary_op(op, var) \
/*
* cmpxchg has no such implied lock semantics as a result it is much
* more efficient for cpu local operations.
*/
#define percpu_cmpxchg_op(var, oval, nval) \
({ \
({ \
typeof(var) pco_ret__; \
typeof(var) pco_old__ = (oval); \
typeof(var) pco_new__ = (nval); \
switch (sizeof(var)) { \
switch (sizeof(var)) { \
case 1: \
case 1: \
asm(op "b "__percpu_arg(0) \
asm("cmpxchgb %2, "__percpu_arg(1) \
: "+m" (var)); \
: "=a" (pco_ret__), "+m" (var) \
: "q" (pco_new__), "0" (pco_old__) \
: "memory"); \
break; \
break; \
case 2: \
case 2: \
asm(op "w "__percpu_arg(0) \
asm("cmpxchgw %2, "__percpu_arg(1) \
: "+m" (var)); \
: "=a" (pco_ret__), "+m" (var) \
: "r" (pco_new__), "0" (pco_old__) \
: "memory"); \
break; \
break; \
case 4: \
case 4: \
asm(op "l "__percpu_arg(0) \
asm("cmpxchgl %2, "__percpu_arg(1) \
: "+m" (var)); \
: "=a" (pco_ret__), "+m" (var) \
: "r" (pco_new__), "0" (pco_old__) \
: "memory"); \
break; \
break; \
case 8: \
case 8: \
asm(op "q "__percpu_arg(0) \
asm("cmpxchgq %2, "__percpu_arg(1) \
: "+m" (var)); \
: "=a" (pco_ret__), "+m" (var) \
: "r" (pco_new__), "0" (pco_old__) \
: "memory"); \
break; \
break; \
default: __bad_percpu_size(); \
default: __bad_percpu_size(); \
} \
} \
pco_ret__; \
})
})
/*
/*
...
@@ -300,6 +386,12 @@ do { \
...
@@ -300,6 +386,12 @@ do { \
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
/*
* Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
* faster than an xchg with forced lock semantics.
*/
#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
...
@@ -319,6 +411,11 @@ do { \
...
@@ -319,6 +411,11 @@ do { \
#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
...
@@ -332,15 +429,32 @@ do { \
...
@@ -332,15 +429,32 @@ do { \
#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#ifndef CONFIG_M386
#ifndef CONFIG_M386
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
#endif
#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#endif
/* !CONFIG_M386 */
/*
/*
* Per cpu atomic 64 bit operations are only available under 64 bit.
* Per cpu atomic 64 bit operations are only available under 64 bit.
* 32 bit must fall back to generic operations.
* 32 bit must fall back to generic operations.
...
@@ -352,6 +466,7 @@ do { \
...
@@ -352,6 +466,7 @@ do { \
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
...
@@ -359,14 +474,12 @@ do { \
...
@@ -359,14 +474,12 @@ do { \
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
#endif
#endif
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
...
...
This diff is collapsed.
Click to expand it.
include/linux/percpu.h
+
163
−
31
View file @
05c2d088
...
@@ -417,6 +417,89 @@ do { \
...
@@ -417,6 +417,89 @@ do { \
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
#endif
#endif
#define _this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) ret__; \
preempt_disable(); \
__this_cpu_add(pcp, val); \
ret__ = __this_cpu_read(pcp); \
preempt_enable(); \
ret__; \
})
#ifndef this_cpu_add_return
# ifndef this_cpu_add_return_1
# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_2
# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_4
# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_8
# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
#endif
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
#define _this_cpu_generic_xchg(pcp, nval) \
({ typeof(pcp) ret__; \
preempt_disable(); \
ret__ = __this_cpu_read(pcp); \
__this_cpu_write(pcp, nval); \
preempt_enable(); \
ret__; \
})
#ifndef this_cpu_xchg
# ifndef this_cpu_xchg_1
# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef this_cpu_xchg_2
# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef this_cpu_xchg_4
# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef this_cpu_xchg_8
# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
# endif
# define this_cpu_xchg(pcp, nval) \
__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
#endif
#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ typeof(pcp) ret__; \
preempt_disable(); \
ret__ = __this_cpu_read(pcp); \
if (ret__ == (oval)) \
__this_cpu_write(pcp, nval); \
preempt_enable(); \
ret__; \
})
#ifndef this_cpu_cmpxchg
# ifndef this_cpu_cmpxchg_1
# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef this_cpu_cmpxchg_2
# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef this_cpu_cmpxchg_4
# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef this_cpu_cmpxchg_8
# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# define this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
#endif
/*
/*
* Generic percpu operations that do not require preemption handling.
* Generic percpu operations that do not require preemption handling.
* Either we do not care about races or the caller has the
* Either we do not care about races or the caller has the
...
@@ -544,36 +627,6 @@ do { \
...
@@ -544,36 +627,6 @@ do { \
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
#endif
#endif
#define _this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) ret__; \
preempt_disable(); \
__this_cpu_add(pcp, val); \
ret__ = __this_cpu_read(pcp); \
preempt_enable(); \
ret__; \
})
#ifndef this_cpu_add_return
# ifndef this_cpu_add_return_1
# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_2
# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_4
# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# ifndef this_cpu_add_return_8
# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
# endif
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
#endif
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
#define __this_cpu_generic_add_return(pcp, val) \
#define __this_cpu_generic_add_return(pcp, val) \
({ \
({ \
__this_cpu_add(pcp, val); \
__this_cpu_add(pcp, val); \
...
@@ -600,11 +653,61 @@ do { \
...
@@ -600,11 +653,61 @@ do { \
#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
#define __this_cpu_generic_xchg(pcp, nval) \
({ typeof(pcp) ret__; \
ret__ = __this_cpu_read(pcp); \
__this_cpu_write(pcp, nval); \
ret__; \
})
#ifndef __this_cpu_xchg
# ifndef __this_cpu_xchg_1
# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef __this_cpu_xchg_2
# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef __this_cpu_xchg_4
# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# ifndef __this_cpu_xchg_8
# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
# endif
# define __this_cpu_xchg(pcp, nval) \
__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
#endif
#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) ret__; \
ret__ = __this_cpu_read(pcp); \
if (ret__ == (oval)) \
__this_cpu_write(pcp, nval); \
ret__; \
})
#ifndef __this_cpu_cmpxchg
# ifndef __this_cpu_cmpxchg_1
# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef __this_cpu_cmpxchg_2
# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef __this_cpu_cmpxchg_4
# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef __this_cpu_cmpxchg_8
# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# define __this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
#endif
/*
/*
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
* are *not* safe against modification of the same variable from another
* processors (which one gets when using regular atomic operations)
* processors (which one gets when using regular atomic operations)
.
They are guaranteed to be atomic vs. local interrupts and
*
They are guaranteed to be atomic vs. local interrupts and
* preemption only.
* preemption only.
*/
*/
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
...
@@ -691,4 +794,33 @@ do { \
...
@@ -691,4 +794,33 @@ do { \
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
#endif
#endif
#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) ret__; \
unsigned long flags; \
local_irq_save(flags); \
ret__ = __this_cpu_read(pcp); \
if (ret__ == (oval)) \
__this_cpu_write(pcp, nval); \
local_irq_restore(flags); \
ret__; \
})
#ifndef irqsafe_cpu_cmpxchg
# ifndef irqsafe_cpu_cmpxchg_1
# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef irqsafe_cpu_cmpxchg_2
# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef irqsafe_cpu_cmpxchg_4
# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# ifndef irqsafe_cpu_cmpxchg_8
# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
# endif
# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
#endif
#endif
/* __LINUX_PERCPU_H */
#endif
/* __LINUX_PERCPU_H */
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
sign in
to comment