1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #ifndef _SCHED_H_
29 #define _SCHED_H_
30 
31 #include <bits/timespec.h>
32 #include <linux/sched.h>
33 #include <sys/cdefs.h>
34 
35 __BEGIN_DECLS
36 
37 /* This name is used by glibc, but not by the kernel. */
38 #define SCHED_OTHER SCHED_NORMAL
39 
40 struct sched_param {
41   int sched_priority;
42 };
43 
44 int sched_setscheduler(pid_t, int, const struct sched_param*);
45 int sched_getscheduler(pid_t);
46 int sched_yield(void);
47 int sched_get_priority_max(int);
48 int sched_get_priority_min(int);
49 int sched_setparam(pid_t, const struct sched_param*);
50 int sched_getparam(pid_t, struct sched_param*);
51 int sched_rr_get_interval(pid_t, struct timespec*);
52 
53 #if defined(__USE_GNU)
54 
55 int clone(int (*)(void*), void*, int, void*, ...) __INTRODUCED_IN_ARM(9)
56     __INTRODUCED_IN_MIPS(12) __INTRODUCED_IN_X86(17);
57 int unshare(int) __INTRODUCED_IN(17);
58 int sched_getcpu(void) __INTRODUCED_IN(12);
59 int setns(int, int) __INTRODUCED_IN(21);
60 
61 #ifdef __LP64__
62 #define CPU_SETSIZE 1024
63 #else
64 #define CPU_SETSIZE 32
65 #endif
66 
67 #define __CPU_BITTYPE  unsigned long int  /* mandated by the kernel  */
68 #define __CPU_BITS     (8 * sizeof(__CPU_BITTYPE))
69 #define __CPU_ELT(x)   ((x) / __CPU_BITS)
70 #define __CPU_MASK(x)  ((__CPU_BITTYPE)1 << ((x) & (__CPU_BITS - 1)))
71 
72 typedef struct {
73   __CPU_BITTYPE  __bits[ CPU_SETSIZE / __CPU_BITS ];
74 } cpu_set_t;
75 
76 int sched_setaffinity(pid_t pid, size_t setsize, const cpu_set_t* set) __INTRODUCED_IN(12);
77 int sched_getaffinity(pid_t pid, size_t setsize, cpu_set_t* set) __INTRODUCED_IN(12);
78 
79 #define CPU_ZERO(set)          CPU_ZERO_S(sizeof(cpu_set_t), set)
80 #define CPU_SET(cpu, set)      CPU_SET_S(cpu, sizeof(cpu_set_t), set)
81 #define CPU_CLR(cpu, set)      CPU_CLR_S(cpu, sizeof(cpu_set_t), set)
82 #define CPU_ISSET(cpu, set)    CPU_ISSET_S(cpu, sizeof(cpu_set_t), set)
83 #define CPU_COUNT(set)         CPU_COUNT_S(sizeof(cpu_set_t), set)
84 #define CPU_EQUAL(set1, set2)  CPU_EQUAL_S(sizeof(cpu_set_t), set1, set2)
85 
86 #define CPU_AND(dst, set1, set2)  __CPU_OP(dst, set1, set2, &)
87 #define CPU_OR(dst, set1, set2)   __CPU_OP(dst, set1, set2, |)
88 #define CPU_XOR(dst, set1, set2)  __CPU_OP(dst, set1, set2, ^)
89 
90 #define __CPU_OP(dst, set1, set2, op)  __CPU_OP_S(sizeof(cpu_set_t), dst, set1, set2, op)
91 
92 /* Support for dynamically-allocated cpu_set_t */
93 
94 #define CPU_ALLOC_SIZE(count) \
95   __CPU_ELT((count) + (__CPU_BITS - 1)) * sizeof(__CPU_BITTYPE)
96 
97 #define CPU_ALLOC(count)  __sched_cpualloc((count))
98 #define CPU_FREE(set)     __sched_cpufree((set))
99 
100 cpu_set_t* __sched_cpualloc(size_t count) __INTRODUCED_IN(12);
101 void __sched_cpufree(cpu_set_t* set) __INTRODUCED_IN(12);
102 
103 #define CPU_ZERO_S(setsize, set)  __builtin_memset(set, 0, setsize)
104 
105 #define CPU_SET_S(cpu, setsize, set) \
106   do { \
107     size_t __cpu = (cpu); \
108     if (__cpu < 8 * (setsize)) \
109       (set)->__bits[__CPU_ELT(__cpu)] |= __CPU_MASK(__cpu); \
110   } while (0)
111 
112 #define CPU_CLR_S(cpu, setsize, set) \
113   do { \
114     size_t __cpu = (cpu); \
115     if (__cpu < 8 * (setsize)) \
116       (set)->__bits[__CPU_ELT(__cpu)] &= ~__CPU_MASK(__cpu); \
117   } while (0)
118 
119 #define CPU_ISSET_S(cpu, setsize, set) \
120   (__extension__ ({ \
121     size_t __cpu = (cpu); \
122     (__cpu < 8 * (setsize)) \
123       ? ((set)->__bits[__CPU_ELT(__cpu)] & __CPU_MASK(__cpu)) != 0 \
124       : 0; \
125   }))
126 
127 #define CPU_EQUAL_S(setsize, set1, set2)  (__builtin_memcmp(set1, set2, setsize) == 0)
128 
129 #define CPU_AND_S(setsize, dst, set1, set2)  __CPU_OP_S(setsize, dst, set1, set2, &)
130 #define CPU_OR_S(setsize, dst, set1, set2)   __CPU_OP_S(setsize, dst, set1, set2, |)
131 #define CPU_XOR_S(setsize, dst, set1, set2)  __CPU_OP_S(setsize, dst, set1, set2, ^)
132 
133 #define __CPU_OP_S(setsize, dstset, srcset1, srcset2, op) \
134   do { \
135     cpu_set_t* __dst = (dstset); \
136     const __CPU_BITTYPE* __src1 = (srcset1)->__bits; \
137     const __CPU_BITTYPE* __src2 = (srcset2)->__bits; \
138     size_t __nn = 0, __nn_max = (setsize)/sizeof(__CPU_BITTYPE); \
139     for (; __nn < __nn_max; __nn++) \
140       (__dst)->__bits[__nn] = __src1[__nn] op __src2[__nn]; \
141   } while (0)
142 
143 #define CPU_COUNT_S(setsize, set)  __sched_cpucount((setsize), (set))
144 
145 int __sched_cpucount(size_t setsize, cpu_set_t* set) __INTRODUCED_IN(12);
146 
147 #endif /* __USE_GNU */
148 
149 __END_DECLS
150 
151 #endif /* _SCHED_H_ */
152