1 /*
2 ** Copyright 2014, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 ** http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16
17 #include <ctype.h>
18 #include <pthread.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
22 #include <sys/_system_properties.h>
23
24 #include <android/log.h>
25
26 #include "log_portability.h"
27
28 static pthread_mutex_t lock_loggable = PTHREAD_MUTEX_INITIALIZER;
29
lock()30 static int lock()
31 {
32 /*
33 * If we trigger a signal handler in the middle of locked activity and the
34 * signal handler logs a message, we could get into a deadlock state.
35 */
36 /*
37 * Any contention, and we can turn around and use the non-cached method
38 * in less time than the system call associated with a mutex to deal with
39 * the contention.
40 */
41 return pthread_mutex_trylock(&lock_loggable);
42 }
43
unlock()44 static void unlock()
45 {
46 pthread_mutex_unlock(&lock_loggable);
47 }
48
49 struct cache {
50 const prop_info *pinfo;
51 uint32_t serial;
52 unsigned char c;
53 };
54
check_cache(struct cache * cache)55 static int check_cache(struct cache *cache)
56 {
57 return cache->pinfo
58 && __system_property_serial(cache->pinfo) != cache->serial;
59 }
60
61 #define BOOLEAN_TRUE 0xFF
62 #define BOOLEAN_FALSE 0xFE
63
refresh_cache(struct cache * cache,const char * key)64 static void refresh_cache(struct cache *cache, const char *key)
65 {
66 char buf[PROP_VALUE_MAX];
67
68 if (!cache->pinfo) {
69 cache->pinfo = __system_property_find(key);
70 if (!cache->pinfo) {
71 return;
72 }
73 }
74 cache->serial = __system_property_serial(cache->pinfo);
75 __system_property_read(cache->pinfo, 0, buf);
76 switch(buf[0]) {
77 case 't': case 'T':
78 cache->c = strcasecmp(buf + 1, "rue") ? buf[0] : BOOLEAN_TRUE;
79 break;
80 case 'f': case 'F':
81 cache->c = strcasecmp(buf + 1, "alse") ? buf[0] : BOOLEAN_FALSE;
82 break;
83 default:
84 cache->c = buf[0];
85 }
86 }
87
__android_log_level(const char * tag,int default_prio)88 static int __android_log_level(const char *tag, int default_prio)
89 {
90 /* sizeof() is used on this array below */
91 static const char log_namespace[] = "persist.log.tag.";
92 static const size_t base_offset = 8; /* skip "persist." */
93 /* calculate the size of our key temporary buffer */
94 const size_t taglen = (tag && *tag) ? strlen(tag) : 0;
95 /* sizeof(log_namespace) = strlen(log_namespace) + 1 */
96 char key[sizeof(log_namespace) + taglen]; /* may be > PROPERTY_KEY_MAX */
97 char *kp;
98 size_t i;
99 char c = 0;
100 /*
101 * Single layer cache of four properties. Priorities are:
102 * log.tag.<tag>
103 * persist.log.tag.<tag>
104 * log.tag
105 * persist.log.tag
106 * Where the missing tag matches all tags and becomes the
107 * system global default. We do not support ro.log.tag* .
108 */
109 static char last_tag[PROP_NAME_MAX];
110 static uint32_t global_serial;
111 /* some compilers erroneously see uninitialized use. !not_locked */
112 uint32_t current_global_serial = 0;
113 static struct cache tag_cache[2];
114 static struct cache global_cache[2];
115 int change_detected;
116 int global_change_detected;
117 int not_locked;
118
119 strcpy(key, log_namespace);
120
121 global_change_detected = change_detected = not_locked = lock();
122
123 if (!not_locked) {
124 /*
125 * check all known serial numbers to changes.
126 */
127 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
128 if (check_cache(&tag_cache[i])) {
129 change_detected = 1;
130 }
131 }
132 for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
133 if (check_cache(&global_cache[i])) {
134 global_change_detected = 1;
135 }
136 }
137
138 current_global_serial = __system_property_area_serial();
139 if (current_global_serial != global_serial) {
140 change_detected = 1;
141 global_change_detected = 1;
142 }
143 }
144
145 if (taglen) {
146 int local_change_detected = change_detected;
147 if (!not_locked) {
148 if (!last_tag[0]
149 || (last_tag[0] != tag[0])
150 || strncmp(last_tag + 1, tag + 1, sizeof(last_tag) - 1)) {
151 /* invalidate log.tag.<tag> cache */
152 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
153 tag_cache[i].pinfo = NULL;
154 tag_cache[i].c = '\0';
155 }
156 last_tag[0] = '\0';
157 local_change_detected = 1;
158 }
159 if (!last_tag[0]) {
160 strncpy(last_tag, tag, sizeof(last_tag));
161 }
162 }
163 strcpy(key + sizeof(log_namespace) - 1, tag);
164
165 kp = key;
166 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
167 struct cache *cache = &tag_cache[i];
168 struct cache temp_cache;
169
170 if (not_locked) {
171 temp_cache.pinfo = NULL;
172 temp_cache.c = '\0';
173 cache = &temp_cache;
174 }
175 if (local_change_detected) {
176 refresh_cache(cache, kp);
177 }
178
179 if (cache->c) {
180 c = cache->c;
181 break;
182 }
183
184 kp = key + base_offset;
185 }
186 }
187
188 switch (toupper(c)) { /* if invalid, resort to global */
189 case 'V':
190 case 'D':
191 case 'I':
192 case 'W':
193 case 'E':
194 case 'F': /* Not officially supported */
195 case 'A':
196 case 'S':
197 case BOOLEAN_FALSE: /* Not officially supported */
198 break;
199 default:
200 /* clear '.' after log.tag */
201 key[sizeof(log_namespace) - 2] = '\0';
202
203 kp = key;
204 for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
205 struct cache *cache = &global_cache[i];
206 struct cache temp_cache;
207
208 if (not_locked) {
209 temp_cache = *cache;
210 if (temp_cache.pinfo != cache->pinfo) { /* check atomic */
211 temp_cache.pinfo = NULL;
212 temp_cache.c = '\0';
213 }
214 cache = &temp_cache;
215 }
216 if (global_change_detected) {
217 refresh_cache(cache, kp);
218 }
219
220 if (cache->c) {
221 c = cache->c;
222 break;
223 }
224
225 kp = key + base_offset;
226 }
227 break;
228 }
229
230 if (!not_locked) {
231 global_serial = current_global_serial;
232 unlock();
233 }
234
235 switch (toupper(c)) {
236 case 'V': return ANDROID_LOG_VERBOSE;
237 case 'D': return ANDROID_LOG_DEBUG;
238 case 'I': return ANDROID_LOG_INFO;
239 case 'W': return ANDROID_LOG_WARN;
240 case 'E': return ANDROID_LOG_ERROR;
241 case 'F': /* FALLTHRU */ /* Not officially supported */
242 case 'A': return ANDROID_LOG_FATAL;
243 case BOOLEAN_FALSE: /* FALLTHRU */ /* Not Officially supported */
244 case 'S': return -1; /* ANDROID_LOG_SUPPRESS */
245 }
246 return default_prio;
247 }
248
__android_log_is_loggable(int prio,const char * tag,int default_prio)249 LIBLOG_ABI_PUBLIC int __android_log_is_loggable(int prio, const char *tag,
250 int default_prio)
251 {
252 int logLevel = __android_log_level(tag, default_prio);
253 return logLevel >= 0 && prio >= logLevel;
254 }
255
__android_log_is_debuggable()256 LIBLOG_HIDDEN int __android_log_is_debuggable()
257 {
258 static uint32_t serial;
259 static struct cache tag_cache;
260 static const char key[] = "ro.debuggable";
261 int ret;
262
263 if (tag_cache.c) { /* ro property does not change after set */
264 ret = tag_cache.c == '1';
265 } else if (lock()) {
266 struct cache temp_cache = { NULL, -1, '\0' };
267 refresh_cache(&temp_cache, key);
268 ret = temp_cache.c == '1';
269 } else {
270 int change_detected = check_cache(&tag_cache);
271 uint32_t current_serial = __system_property_area_serial();
272 if (current_serial != serial) {
273 change_detected = 1;
274 }
275 if (change_detected) {
276 refresh_cache(&tag_cache, key);
277 serial = current_serial;
278 }
279 ret = tag_cache.c == '1';
280
281 unlock();
282 }
283
284 return ret;
285 }
286
287 /*
288 * For properties that are read often, but generally remain constant.
289 * Since a change is rare, we will accept a trylock failure gracefully.
290 * Use a separate lock from is_loggable to keep contention down b/25563384.
291 */
292 struct cache2 {
293 pthread_mutex_t lock;
294 uint32_t serial;
295 const char *key_persist;
296 struct cache cache_persist;
297 const char *key_ro;
298 struct cache cache_ro;
299 unsigned char (*const evaluate)(const struct cache2 *self);
300 };
301
do_cache2(struct cache2 * self)302 static inline unsigned char do_cache2(struct cache2 *self)
303 {
304 uint32_t current_serial;
305 int change_detected;
306 unsigned char c;
307
308 if (pthread_mutex_trylock(&self->lock)) {
309 /* We are willing to accept some race in this context */
310 return self->evaluate(self);
311 }
312
313 change_detected = check_cache(&self->cache_persist)
314 || check_cache(&self->cache_ro);
315 current_serial = __system_property_area_serial();
316 if (current_serial != self->serial) {
317 change_detected = 1;
318 }
319 if (change_detected) {
320 refresh_cache(&self->cache_persist, self->key_persist);
321 refresh_cache(&self->cache_ro, self->key_ro);
322 self->serial = current_serial;
323 }
324 c = self->evaluate(self);
325
326 pthread_mutex_unlock(&self->lock);
327
328 return c;
329 }
330
evaluate_persist_ro(const struct cache2 * self)331 static unsigned char evaluate_persist_ro(const struct cache2 *self)
332 {
333 unsigned char c = self->cache_persist.c;
334
335 if (c) {
336 return c;
337 }
338
339 return self->cache_ro.c;
340 }
341
342 /*
343 * Timestamp state generally remains constant, but can change at any time
344 * to handle developer requirements.
345 */
android_log_clockid()346 LIBLOG_ABI_PUBLIC clockid_t android_log_clockid()
347 {
348 static struct cache2 clockid = {
349 PTHREAD_MUTEX_INITIALIZER,
350 0,
351 "persist.logd.timestamp",
352 { NULL, -1, '\0' },
353 "ro.logd.timestamp",
354 { NULL, -1, '\0' },
355 evaluate_persist_ro
356 };
357
358 return (tolower(do_cache2(&clockid)) == 'm')
359 ? CLOCK_MONOTONIC
360 : CLOCK_REALTIME;
361 }
362
363 /*
364 * Security state generally remains constant, but the DO must be able
365 * to turn off logging should it become spammy after an attack is detected.
366 */
evaluate_security(const struct cache2 * self)367 static unsigned char evaluate_security(const struct cache2 *self)
368 {
369 unsigned char c = self->cache_ro.c;
370
371 return (c != BOOLEAN_FALSE) && c && (self->cache_persist.c == BOOLEAN_TRUE);
372 }
373
__android_log_security()374 LIBLOG_ABI_PUBLIC int __android_log_security()
375 {
376 static struct cache2 security = {
377 PTHREAD_MUTEX_INITIALIZER,
378 0,
379 "persist.logd.security",
380 { NULL, -1, BOOLEAN_FALSE },
381 "ro.device_owner",
382 { NULL, -1, BOOLEAN_FALSE },
383 evaluate_security
384 };
385
386 return do_cache2(&security);
387 }
388