1 #ifndef KCT_H_
2 #  define KCT_H_
3 
4 #  include <linux/netlink.h>
5 
6 /*
7  * warning: structures and constants in this header must match the
8  * ones in libc/kernel/common/linux/kct.h, so that information can
9  * be exchange between kernel and userspace throught netlink socket.
10  */
11 /* flags to optionally filter events on android property activation */
12 #define	EV_FLAGS_PRIORITY_LOW	(1<<0)
13 
14 #  ifndef MAX_SB_N
15 #    define MAX_SB_N 32
16 #  endif
17 
18 #  ifndef MAX_EV_N
19 #    define MAX_EV_N 32
20 #  endif
21 
22 #  define NETLINK_CRASHTOOL 27
23 #  define ATTCHMT_ALIGN 4U
24 
25 /* Type of events supported by crashtool */
26 enum ct_ev_type {
27 	CT_EV_STAT,
28 	CT_EV_INFO,
29 	CT_EV_ERROR,
30 	CT_EV_CRASH,
31 	CT_EV_LAST
32 };
33 
34 enum ct_attchmt_type {
35 	CT_ATTCHMT_DATA0,
36 	CT_ATTCHMT_DATA1,
37 	CT_ATTCHMT_DATA2,
38 	CT_ATTCHMT_DATA3,
39 	CT_ATTCHMT_DATA4,
40 	CT_ATTCHMT_DATA5,
41 	/* Always add new types after DATA5 */
42 	CT_ATTCHMT_BINARY,
43 	CT_ATTCHMT_FILELIST
44 };
45 
46 struct ct_attchmt {
47 	__u32 size; /* sizeof(data) */
48 	enum ct_attchmt_type type;
49 	char data[];
50 } __aligned(4);
51 
52 struct ct_event {
53 	__u64 timestamp;
54 	char submitter_name[MAX_SB_N];
55 	char ev_name[MAX_EV_N];
56 	enum ct_ev_type type;
57 	__u32 attchmt_size; /* sizeof(all_attachments inc. padding) */
58 	__u32 flags;
59 	struct ct_attchmt attachments[];
60 } __aligned(4);
61 
62 enum kct_nlmsg_type {
63 	/* kernel -> userland */
64 	KCT_EVENT,
65 	/* userland -> kernel */
66 	KCT_SET_PID = 4200,
67 };
68 
69 struct kct_packet {
70 	struct nlmsghdr nlh;
71 	struct ct_event event;
72 };
73 
74 #  define ATTCHMT_ALIGNMENT	4
75 
76 #  ifndef KCT_ALIGN
77 #    define __KCT_ALIGN_MASK(x, mask)    (((x) + (mask)) & ~(mask))
78 #    define __KCT_ALIGN(x, a)            __KCT_ALIGN_MASK(x, (typeof(x))(a) - 1)
79 #    define KCT_ALIGN(x, a)		     __KCT_ALIGN((x), (a))
80 #  endif /* !KCT_ALIGN */
81 
82 #  define foreach_attchmt(Event, Attchmt)				\
83 	if ((Event)->attchmt_size)					\
84 		for ((Attchmt) = (Event)->attachments;			\
85 		     (Attchmt) < (typeof(Attchmt))(((char *)		\
86 				  (Event)->attachments) +               \
87 			(Event)->attchmt_size);                         \
88 	(Attchmt) = (typeof(Attchmt))KCT_ALIGN(((size_t)(Attchmt)) \
89 						     + sizeof(*(Attchmt)) + \
90 			      (Attchmt)->size, ATTCHMT_ALIGNMENT))
91 
92 /*
93  * User should use the macros below rather than those extern functions
94  * directly. Laters' declaration are only to set them __weak so
95  * that the macros works fine.
96  */
97 /* Raw API (deprecated) */
98 extern struct ct_event *kct_alloc_event(const char *submitter_name,
99 					const char *ev_name,
100 					enum ct_ev_type ev_type,
101 					gfp_t flags, uint eflags) __weak;
102 extern int kct_add_attchmt(struct ct_event **ev,
103 			   enum ct_attchmt_type at_type,
104 			   unsigned int size,
105 			   char *data, gfp_t flags)  __weak;
106 extern void kct_free_event(struct ct_event *ev) __weak;
107 extern int kct_log_event(struct ct_event *ev, gfp_t flags) __weak;
108 
109 /* API */
110 #define MKFN(fn, ...) MKFN_N(fn, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)(__VA_ARGS__)
111 #define MKFN_N(fn, n0, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n, ...) fn##n
112 #define kct_log(...) MKFN(__kct_log_, ##__VA_ARGS__)
113 
114 #define __kct_log_4(Type, Submitter_name, Ev_name, flags) \
115 	do {  if (kct_alloc_event) {	\
116 		struct ct_event *__ev =	\
117 			kct_alloc_event(Submitter_name, Ev_name, Type, \
118 				GFP_ATOMIC, flags); \
119 		if (__ev) { \
120 			kct_log_event(__ev, GFP_ATOMIC); \
121 		} \
122 	} } while (0)
123 
124 #define __kct_log_5(Type, Submitter_name, Ev_name, flags, Data0) \
125 	do {  if (kct_alloc_event) {	\
126 		struct ct_event *__ev =	\
127 			kct_alloc_event(Submitter_name, Ev_name, Type, \
128 				GFP_ATOMIC, flags); \
129 		if (__ev) { \
130 			if (Data0) \
131 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
132 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
133 			kct_log_event(__ev, GFP_ATOMIC); \
134 		} \
135 	} } while (0)
136 
137 #define __kct_log_6(Type, Submitter_name, Ev_name, flags, Data0, Data1) \
138 	do {  if (kct_alloc_event) {	\
139 		struct ct_event *__ev =	\
140 			kct_alloc_event(Submitter_name, Ev_name, Type, \
141 				GFP_ATOMIC, flags); \
142 		if (__ev) { \
143 			if (Data0) \
144 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
145 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
146 			if (Data1) \
147 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
148 					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
149 			kct_log_event(__ev, GFP_ATOMIC); \
150 		} \
151 	} } while (0)
152 
153 #define __kct_log_7(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2) \
154 	do {  if (kct_alloc_event) {	\
155 		struct ct_event *__ev =	\
156 			kct_alloc_event(Submitter_name, Ev_name, Type, \
157 				GFP_ATOMIC, flags); \
158 		if (__ev) { \
159 			if (Data0) \
160 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
161 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
162 			if (Data1) \
163 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
164 					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
165 			if (Data2) \
166 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
167 					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
168 			kct_log_event(__ev, GFP_ATOMIC); \
169 		} \
170 	} } while (0)
171 
172 #define __kct_log_8(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
173 					Data3) \
174 	do {  if (kct_alloc_event) {	\
175 		struct ct_event *__ev =	\
176 			kct_alloc_event(Submitter_name, Ev_name, Type, \
177 				GFP_ATOMIC, flags); \
178 		if (__ev) { \
179 			if (Data0) \
180 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
181 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
182 			if (Data1) \
183 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
184 					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
185 			if (Data2) \
186 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
187 					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
188 			if (Data3) \
189 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
190 					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
191 			kct_log_event(__ev, GFP_ATOMIC); \
192 		} \
193 	} } while (0)
194 
195 	#define __kct_log_9(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
196 					 Data3, Data4) \
197 	do {  if (kct_alloc_event) {	\
198 		struct ct_event *__ev =	\
199 			kct_alloc_event(Submitter_name, Ev_name, Type, \
200 				GFP_ATOMIC, flags); \
201 		if (__ev) { \
202 			if (Data0) \
203 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
204 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
205 			if (Data1) \
206 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
207 					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
208 			if (Data2) \
209 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
210 					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
211 			if (Data3) \
212 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
213 					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
214 			if (Data4) \
215 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA4, \
216 					strlen(Data4) + 1, Data4, GFP_ATOMIC); \
217 			kct_log_event(__ev, GFP_ATOMIC); \
218 		} \
219 	} } while (0)
220 
221 	#define __kct_log_10(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
222 					 Data3, Data4, Data5) \
223 	do {  if (kct_alloc_event) {	\
224 		struct ct_event *__ev =	\
225 			kct_alloc_event(Submitter_name, Ev_name, Type, \
226 				GFP_ATOMIC, flags); \
227 		if (__ev) { \
228 			if (Data0) \
229 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
230 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
231 			if (Data1) \
232 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
233 					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
234 			if (Data2) \
235 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
236 					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
237 			if (Data3) \
238 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
239 					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
240 			if (Data4) \
241 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA4, \
242 					strlen(Data4) + 1, Data4, GFP_ATOMIC); \
243 			if (Data5) \
244 				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA5, \
245 					strlen(Data5) + 1, Data5, GFP_ATOMIC); \
246 			kct_log_event(__ev, GFP_ATOMIC); \
247 		} \
248 	} } while (0)
249 
250 	#define __kct_log_11(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
251 					 Data3, Data4, Data5, filelist) \
252 	do {  if (kct_alloc_event) {	\
253 		struct ct_event *__ev =	\
254 			kct_alloc_event(Submitter_name, Ev_name, Type, \
255 				GFP_ATOMIC, flags); \
256 		if (__ev) { \
257 			if (Data0) \
258 			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
259 					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
260 			if (Data1) \
261 			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
262 					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
263 			if (Data2) \
264 			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
265 					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
266 			if (Data3) \
267 			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
268 					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
269 			if (Data4) \
270 			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA4, \
271 					strlen(Data4) + 1, Data4, GFP_ATOMIC); \
272 			if (Data5) \
273 			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA5, \
274 					strlen(Data5) + 1, Data5, GFP_ATOMIC); \
275 			if (filelist) \
276 			kct_add_attchmt(&__ev, CT_ATTCHMT_FILELIST, \
277 					strlen(filelist) + 1, filelist, GFP_ATOMIC); \
278 			kct_log_event(__ev, GFP_ATOMIC); \
279 		} \
280 	} } while (0)
281 
282 #endif /* !KCT_H_ */
283