1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "system_properties/prop_area.h"
30
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <stdlib.h>
34 #include <sys/cdefs.h>
35 #include <sys/stat.h>
36 #include <sys/types.h>
37 #include <sys/xattr.h>
38 #include <unistd.h>
39
40 #include <new>
41
42 #include <async_safe/log.h>
43
44 #ifdef LARGE_SYSTEM_PROPERTY_NODE
45 constexpr size_t PA_SIZE = 1024 * 1024;
46 #else
47 constexpr size_t PA_SIZE = 128 * 1024;
48 #endif
49 constexpr uint32_t PROP_AREA_MAGIC = 0x504f5250;
50 constexpr uint32_t PROP_AREA_VERSION = 0xfc6ed0ab;
51
52 size_t prop_area::pa_size_ = 0;
53 size_t prop_area::pa_data_size_ = 0;
54
map_prop_area_rw(const char * filename,const char * context,bool * fsetxattr_failed)55 prop_area* prop_area::map_prop_area_rw(const char* filename, const char* context,
56 bool* fsetxattr_failed) {
57 /* dev is a tmpfs that we can use to carve a shared workspace
58 * out of, so let's do that...
59 */
60 const int fd = open(filename, O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444);
61
62 if (fd < 0) {
63 if (errno == EACCES) {
64 /* for consistency with the case where the process has already
65 * mapped the page in and segfaults when trying to write to it
66 */
67 abort();
68 }
69 return nullptr;
70 }
71
72 if (context) {
73 if (fsetxattr(fd, XATTR_NAME_SELINUX, context, strlen(context) + 1, 0) != 0) {
74 async_safe_format_log(ANDROID_LOG_ERROR, "libc",
75 "fsetxattr failed to set context (%s) for \"%s\"", context, filename);
76 /*
77 * fsetxattr() will fail during system properties tests due to selinux policy.
78 * We do not want to create a custom policy for the tester, so we will continue in
79 * this function but set a flag that an error has occurred.
80 * Init, which is the only daemon that should ever call this function will abort
81 * when this error occurs.
82 * Otherwise, the tester will ignore it and continue, albeit without any selinux
83 * property separation.
84 */
85 if (fsetxattr_failed) {
86 *fsetxattr_failed = true;
87 }
88 }
89 }
90
91 if (ftruncate(fd, PA_SIZE) < 0) {
92 close(fd);
93 return nullptr;
94 }
95
96 pa_size_ = PA_SIZE;
97 pa_data_size_ = pa_size_ - sizeof(prop_area);
98
99 void* const memory_area = mmap(nullptr, pa_size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
100 if (memory_area == MAP_FAILED) {
101 close(fd);
102 return nullptr;
103 }
104
105 prop_area* pa = new (memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION);
106
107 close(fd);
108 return pa;
109 }
110
map_fd_ro(const int fd)111 prop_area* prop_area::map_fd_ro(const int fd) {
112 struct stat fd_stat;
113 if (fstat(fd, &fd_stat) < 0) {
114 return nullptr;
115 }
116
117 if ((fd_stat.st_uid != 0) || (fd_stat.st_gid != 0) ||
118 ((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) ||
119 (fd_stat.st_size < static_cast<off_t>(sizeof(prop_area)))) {
120 return nullptr;
121 }
122
123 pa_size_ = fd_stat.st_size;
124 pa_data_size_ = pa_size_ - sizeof(prop_area);
125
126 void* const map_result = mmap(nullptr, pa_size_, PROT_READ, MAP_SHARED, fd, 0);
127 if (map_result == MAP_FAILED) {
128 return nullptr;
129 }
130
131 prop_area* pa = reinterpret_cast<prop_area*>(map_result);
132 if ((pa->magic() != PROP_AREA_MAGIC) || (pa->version() != PROP_AREA_VERSION)) {
133 munmap(pa, pa_size_);
134 return nullptr;
135 }
136
137 return pa;
138 }
139
map_prop_area(const char * filename)140 prop_area* prop_area::map_prop_area(const char* filename) {
141 int fd = open(filename, O_CLOEXEC | O_NOFOLLOW | O_RDONLY);
142 if (fd == -1) return nullptr;
143
144 prop_area* map_result = map_fd_ro(fd);
145 close(fd);
146
147 return map_result;
148 }
149
allocate_obj(const size_t size,uint_least32_t * const off)150 void* prop_area::allocate_obj(const size_t size, uint_least32_t* const off) {
151 const size_t aligned = __BIONIC_ALIGN(size, sizeof(uint_least32_t));
152 if (bytes_used_ + aligned > pa_data_size_) {
153 return nullptr;
154 }
155
156 *off = bytes_used_;
157 bytes_used_ += aligned;
158 return data_ + *off;
159 }
160
new_prop_trie_node(const char * name,uint32_t namelen,uint_least32_t * const off)161 prop_trie_node* prop_area::new_prop_trie_node(const char* name, uint32_t namelen,
162 uint_least32_t* const off) {
163 uint_least32_t new_offset;
164 void* const p = allocate_obj(sizeof(prop_trie_node) + namelen + 1, &new_offset);
165 if (p == nullptr) return nullptr;
166
167 prop_trie_node* node = new (p) prop_trie_node(name, namelen);
168 *off = new_offset;
169 return node;
170 }
171
new_prop_info(const char * name,uint32_t namelen,const char * value,uint32_t valuelen,uint_least32_t * const off)172 prop_info* prop_area::new_prop_info(const char* name, uint32_t namelen, const char* value,
173 uint32_t valuelen, uint_least32_t* const off) {
174 uint_least32_t new_offset;
175 void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset);
176 if (p == nullptr) return nullptr;
177
178 prop_info* info;
179 if (valuelen >= PROP_VALUE_MAX) {
180 uint32_t long_value_offset = 0;
181 char* long_location = reinterpret_cast<char*>(allocate_obj(valuelen + 1, &long_value_offset));
182 if (!long_location) return nullptr;
183
184 memcpy(long_location, value, valuelen);
185 long_location[valuelen] = '\0';
186
187 // Both new_offset and long_value_offset are offsets based off of data_, however prop_info
188 // does not know what data_ is, so we change this offset to be an offset from the prop_info
189 // pointer that contains it.
190 long_value_offset -= new_offset;
191
192 info = new (p) prop_info(name, namelen, long_value_offset);
193 } else {
194 info = new (p) prop_info(name, namelen, value, valuelen);
195 }
196 *off = new_offset;
197 return info;
198 }
199
to_prop_obj(uint_least32_t off)200 void* prop_area::to_prop_obj(uint_least32_t off) {
201 if (off > pa_data_size_) return nullptr;
202
203 return (data_ + off);
204 }
205
to_prop_trie_node(atomic_uint_least32_t * off_p)206 inline prop_trie_node* prop_area::to_prop_trie_node(atomic_uint_least32_t* off_p) {
207 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
208 return reinterpret_cast<prop_trie_node*>(to_prop_obj(off));
209 }
210
to_prop_info(atomic_uint_least32_t * off_p)211 inline prop_info* prop_area::to_prop_info(atomic_uint_least32_t* off_p) {
212 uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
213 return reinterpret_cast<prop_info*>(to_prop_obj(off));
214 }
215
root_node()216 inline prop_trie_node* prop_area::root_node() {
217 return reinterpret_cast<prop_trie_node*>(to_prop_obj(0));
218 }
219
cmp_prop_name(const char * one,uint32_t one_len,const char * two,uint32_t two_len)220 static int cmp_prop_name(const char* one, uint32_t one_len, const char* two, uint32_t two_len) {
221 if (one_len < two_len)
222 return -1;
223 else if (one_len > two_len)
224 return 1;
225 else
226 return strncmp(one, two, one_len);
227 }
228
find_prop_trie_node(prop_trie_node * const trie,const char * name,uint32_t namelen,bool alloc_if_needed)229 prop_trie_node* prop_area::find_prop_trie_node(prop_trie_node* const trie, const char* name,
230 uint32_t namelen, bool alloc_if_needed) {
231 prop_trie_node* current = trie;
232 while (true) {
233 if (!current) {
234 return nullptr;
235 }
236
237 const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
238 if (ret == 0) {
239 return current;
240 }
241
242 if (ret < 0) {
243 uint_least32_t left_offset = atomic_load_explicit(¤t->left, memory_order_relaxed);
244 if (left_offset != 0) {
245 current = to_prop_trie_node(¤t->left);
246 } else {
247 if (!alloc_if_needed) {
248 return nullptr;
249 }
250
251 uint_least32_t new_offset;
252 prop_trie_node* new_node = new_prop_trie_node(name, namelen, &new_offset);
253 if (new_node) {
254 atomic_store_explicit(¤t->left, new_offset, memory_order_release);
255 }
256 return new_node;
257 }
258 } else {
259 uint_least32_t right_offset = atomic_load_explicit(¤t->right, memory_order_relaxed);
260 if (right_offset != 0) {
261 current = to_prop_trie_node(¤t->right);
262 } else {
263 if (!alloc_if_needed) {
264 return nullptr;
265 }
266
267 uint_least32_t new_offset;
268 prop_trie_node* new_node = new_prop_trie_node(name, namelen, &new_offset);
269 if (new_node) {
270 atomic_store_explicit(¤t->right, new_offset, memory_order_release);
271 }
272 return new_node;
273 }
274 }
275 }
276 }
277
find_property(prop_trie_node * const trie,const char * name,uint32_t namelen,const char * value,uint32_t valuelen,bool alloc_if_needed)278 const prop_info* prop_area::find_property(prop_trie_node* const trie, const char* name,
279 uint32_t namelen, const char* value, uint32_t valuelen,
280 bool alloc_if_needed) {
281 if (!trie) return nullptr;
282
283 const char* remaining_name = name;
284 prop_trie_node* current = trie;
285 while (true) {
286 const char* sep = strchr(remaining_name, '.');
287 const bool want_subtree = (sep != nullptr);
288 const uint32_t substr_size = (want_subtree) ? sep - remaining_name : strlen(remaining_name);
289
290 if (!substr_size) {
291 return nullptr;
292 }
293
294 prop_trie_node* root = nullptr;
295 uint_least32_t children_offset = atomic_load_explicit(¤t->children, memory_order_relaxed);
296 if (children_offset != 0) {
297 root = to_prop_trie_node(¤t->children);
298 } else if (alloc_if_needed) {
299 uint_least32_t new_offset;
300 root = new_prop_trie_node(remaining_name, substr_size, &new_offset);
301 if (root) {
302 atomic_store_explicit(¤t->children, new_offset, memory_order_release);
303 }
304 }
305
306 if (!root) {
307 return nullptr;
308 }
309
310 current = find_prop_trie_node(root, remaining_name, substr_size, alloc_if_needed);
311 if (!current) {
312 return nullptr;
313 }
314
315 if (!want_subtree) break;
316
317 remaining_name = sep + 1;
318 }
319
320 uint_least32_t prop_offset = atomic_load_explicit(¤t->prop, memory_order_relaxed);
321 if (prop_offset != 0) {
322 return to_prop_info(¤t->prop);
323 } else if (alloc_if_needed) {
324 uint_least32_t new_offset;
325 prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
326 if (new_info) {
327 atomic_store_explicit(¤t->prop, new_offset, memory_order_release);
328 }
329
330 return new_info;
331 } else {
332 return nullptr;
333 }
334 }
335
foreach_property(prop_trie_node * const trie,void (* propfn)(const prop_info * pi,void * cookie),void * cookie)336 bool prop_area::foreach_property(prop_trie_node* const trie,
337 void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
338 if (!trie) return false;
339
340 uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
341 if (left_offset != 0) {
342 const int err = foreach_property(to_prop_trie_node(&trie->left), propfn, cookie);
343 if (err < 0) return false;
344 }
345 uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
346 if (prop_offset != 0) {
347 prop_info* info = to_prop_info(&trie->prop);
348 if (!info) return false;
349 propfn(info, cookie);
350 }
351 uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
352 if (children_offset != 0) {
353 const int err = foreach_property(to_prop_trie_node(&trie->children), propfn, cookie);
354 if (err < 0) return false;
355 }
356 uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
357 if (right_offset != 0) {
358 const int err = foreach_property(to_prop_trie_node(&trie->right), propfn, cookie);
359 if (err < 0) return false;
360 }
361
362 return true;
363 }
364
find(const char * name)365 const prop_info* prop_area::find(const char* name) {
366 return find_property(root_node(), name, strlen(name), nullptr, 0, false);
367 }
368
add(const char * name,unsigned int namelen,const char * value,unsigned int valuelen)369 bool prop_area::add(const char* name, unsigned int namelen, const char* value,
370 unsigned int valuelen) {
371 return find_property(root_node(), name, namelen, value, valuelen, true);
372 }
373
foreach(void (* propfn)(const prop_info * pi,void * cookie),void * cookie)374 bool prop_area::foreach (void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
375 return foreach_property(root_node(), propfn, cookie);
376 }
377