1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #define _BSD_SOURCE
7 #define _DEFAULT_SOURCE
8 #define _GNU_SOURCE
9 
10 #include <asm/unistd.h>
11 #include <ctype.h>
12 #include <dirent.h>
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <grp.h>
16 #include <inttypes.h>
17 #include <limits.h>
18 #include <linux/capability.h>
19 #include <net/if.h>
20 #include <pwd.h>
21 #include <sched.h>
22 #include <signal.h>
23 #include <stdarg.h>
24 #include <stdbool.h>
25 #include <stddef.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/capability.h>
30 #include <sys/mount.h>
31 #include <sys/param.h>
32 #include <sys/prctl.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/user.h>
37 #include <sys/wait.h>
38 #include <syscall.h>
39 #include <unistd.h>
40 
41 #include "libminijail.h"
42 #include "libminijail-private.h"
43 
44 #include "signal_handler.h"
45 #include "syscall_filter.h"
46 #include "syscall_wrapper.h"
47 #include "util.h"
48 
49 #ifdef HAVE_SECUREBITS_H
50 # include <linux/securebits.h>
51 #else
52 # define SECURE_ALL_BITS	0x55
53 # define SECURE_ALL_LOCKS	(SECURE_ALL_BITS << 1)
54 #endif
55 /* For kernels < 4.3. */
56 #define OLD_SECURE_ALL_BITS	0x15
57 #define OLD_SECURE_ALL_LOCKS	(OLD_SECURE_ALL_BITS << 1)
58 
59 /*
60  * Assert the value of SECURE_ALL_BITS at compile-time.
61  * Brillo devices are currently compiled against 4.4 kernel headers. Kernel 4.3
62  * added a new securebit.
63  * When a new securebit is added, the new SECURE_ALL_BITS mask will return EPERM
64  * when used on older kernels. The compile-time assert will catch this situation
65  * at compile time.
66  */
67 #ifdef __BRILLO__
68 _Static_assert(SECURE_ALL_BITS == 0x55, "SECURE_ALL_BITS == 0x55.");
69 #endif
70 
71 /* Until these are reliably available in linux/prctl.h. */
72 #ifndef PR_SET_SECCOMP
73 # define PR_SET_SECCOMP 22
74 #endif
75 
76 #ifndef PR_ALT_SYSCALL
77 # define PR_ALT_SYSCALL 0x43724f53
78 #endif
79 
80 /* Seccomp filter related flags. */
81 #ifndef PR_SET_NO_NEW_PRIVS
82 # define PR_SET_NO_NEW_PRIVS 38
83 #endif
84 
85 #ifndef SECCOMP_MODE_FILTER
86 # define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
87 #endif
88 
89 #ifndef SECCOMP_SET_MODE_STRICT
90 # define SECCOMP_SET_MODE_STRICT 0
91 #endif
92 #ifndef SECCOMP_SET_MODE_FILTER
93 # define SECCOMP_SET_MODE_FILTER 1
94 #endif
95 
96 #ifndef SECCOMP_FILTER_FLAG_TSYNC
97 # define SECCOMP_FILTER_FLAG_TSYNC 1
98 #endif
99 /* End seccomp filter related flags. */
100 
101 /* New cgroup namespace might not be in linux-headers yet. */
102 #ifndef CLONE_NEWCGROUP
103 # define CLONE_NEWCGROUP 0x02000000
104 #endif
105 
106 #define MAX_CGROUPS 10 /* 10 different controllers supported by Linux. */
107 
108 /* Keyctl commands. */
109 #define KEYCTL_JOIN_SESSION_KEYRING 1
110 
111 struct mountpoint {
112 	char *src;
113 	char *dest;
114 	char *type;
115 	char *data;
116 	int has_data;
117 	unsigned long flags;
118 	struct mountpoint *next;
119 };
120 
121 struct minijail {
122 	/*
123 	 * WARNING: if you add a flag here you need to make sure it's
124 	 * accounted for in minijail_pre{enter|exec}() below.
125 	 */
126 	struct {
127 		int uid : 1;
128 		int gid : 1;
129 		int inherit_suppl_gids : 1;
130 		int set_suppl_gids : 1;
131 		int keep_suppl_gids : 1;
132 		int use_caps : 1;
133 		int capbset_drop : 1;
134 		int vfs : 1;
135 		int enter_vfs : 1;
136 		int skip_remount_private : 1;
137 		int pids : 1;
138 		int ipc : 1;
139 		int net : 1;
140 		int enter_net : 1;
141 		int ns_cgroups : 1;
142 		int userns : 1;
143 		int disable_setgroups : 1;
144 		int seccomp : 1;
145 		int remount_proc_ro : 1;
146 		int no_new_privs : 1;
147 		int seccomp_filter : 1;
148 		int seccomp_filter_tsync : 1;
149 		int seccomp_filter_logging : 1;
150 		int chroot : 1;
151 		int pivot_root : 1;
152 		int mount_tmp : 1;
153 		int do_init : 1;
154 		int pid_file : 1;
155 		int cgroups : 1;
156 		int alt_syscall : 1;
157 		int reset_signal_mask : 1;
158 		int close_open_fds : 1;
159 		int new_session_keyring : 1;
160 	} flags;
161 	uid_t uid;
162 	gid_t gid;
163 	gid_t usergid;
164 	char *user;
165 	size_t suppl_gid_count;
166 	gid_t *suppl_gid_list;
167 	uint64_t caps;
168 	uint64_t cap_bset;
169 	pid_t initpid;
170 	int mountns_fd;
171 	int netns_fd;
172 	char *chrootdir;
173 	char *pid_file_path;
174 	char *uidmap;
175 	char *gidmap;
176 	size_t filter_len;
177 	struct sock_fprog *filter_prog;
178 	char *alt_syscall_table;
179 	struct mountpoint *mounts_head;
180 	struct mountpoint *mounts_tail;
181 	size_t mounts_count;
182 	size_t tmpfs_size;
183 	char *cgroups[MAX_CGROUPS];
184 	size_t cgroup_count;
185 };
186 
187 /*
188  * Strip out flags meant for the parent.
189  * We keep things that are not inherited across execve(2) (e.g. capabilities),
190  * or are easier to set after execve(2) (e.g. seccomp filters).
191  */
minijail_preenter(struct minijail * j)192 void minijail_preenter(struct minijail *j)
193 {
194 	j->flags.vfs = 0;
195 	j->flags.enter_vfs = 0;
196 	j->flags.skip_remount_private = 0;
197 	j->flags.remount_proc_ro = 0;
198 	j->flags.pids = 0;
199 	j->flags.do_init = 0;
200 	j->flags.pid_file = 0;
201 	j->flags.cgroups = 0;
202 }
203 
204 /*
205  * Strip out flags meant for the child.
206  * We keep things that are inherited across execve(2).
207  */
minijail_preexec(struct minijail * j)208 void minijail_preexec(struct minijail *j)
209 {
210 	int vfs = j->flags.vfs;
211 	int enter_vfs = j->flags.enter_vfs;
212 	int skip_remount_private = j->flags.skip_remount_private;
213 	int remount_proc_ro = j->flags.remount_proc_ro;
214 	int userns = j->flags.userns;
215 	if (j->user)
216 		free(j->user);
217 	j->user = NULL;
218 	if (j->suppl_gid_list)
219 		free(j->suppl_gid_list);
220 	j->suppl_gid_list = NULL;
221 	memset(&j->flags, 0, sizeof(j->flags));
222 	/* Now restore anything we meant to keep. */
223 	j->flags.vfs = vfs;
224 	j->flags.enter_vfs = enter_vfs;
225 	j->flags.skip_remount_private = skip_remount_private;
226 	j->flags.remount_proc_ro = remount_proc_ro;
227 	j->flags.userns = userns;
228 	/* Note, |pids| will already have been used before this call. */
229 }
230 
231 /* Minijail API. */
232 
minijail_new(void)233 struct minijail API *minijail_new(void)
234 {
235 	return calloc(1, sizeof(struct minijail));
236 }
237 
minijail_change_uid(struct minijail * j,uid_t uid)238 void API minijail_change_uid(struct minijail *j, uid_t uid)
239 {
240 	if (uid == 0)
241 		die("useless change to uid 0");
242 	j->uid = uid;
243 	j->flags.uid = 1;
244 }
245 
minijail_change_gid(struct minijail * j,gid_t gid)246 void API minijail_change_gid(struct minijail *j, gid_t gid)
247 {
248 	if (gid == 0)
249 		die("useless change to gid 0");
250 	j->gid = gid;
251 	j->flags.gid = 1;
252 }
253 
minijail_set_supplementary_gids(struct minijail * j,size_t size,const gid_t * list)254 void API minijail_set_supplementary_gids(struct minijail *j, size_t size,
255 					 const gid_t *list)
256 {
257 	size_t i;
258 
259 	if (j->flags.inherit_suppl_gids)
260 		die("cannot inherit *and* set supplementary groups");
261 	if (j->flags.keep_suppl_gids)
262 		die("cannot keep *and* set supplementary groups");
263 
264 	if (size == 0) {
265 		/* Clear supplementary groups. */
266 		j->suppl_gid_list = NULL;
267 		j->suppl_gid_count = 0;
268 		j->flags.set_suppl_gids = 1;
269 		return;
270 	}
271 
272 	/* Copy the gid_t array. */
273 	j->suppl_gid_list = calloc(size, sizeof(gid_t));
274 	if (!j->suppl_gid_list) {
275 		die("failed to allocate internal supplementary group array");
276 	}
277 	for (i = 0; i < size; i++) {
278 		j->suppl_gid_list[i] = list[i];
279 	}
280 	j->suppl_gid_count = size;
281 	j->flags.set_suppl_gids = 1;
282 }
283 
minijail_keep_supplementary_gids(struct minijail * j)284 void API minijail_keep_supplementary_gids(struct minijail *j) {
285 	j->flags.keep_suppl_gids = 1;
286 }
287 
minijail_change_user(struct minijail * j,const char * user)288 int API minijail_change_user(struct minijail *j, const char *user)
289 {
290 	char *buf = NULL;
291 	struct passwd pw;
292 	struct passwd *ppw = NULL;
293 	ssize_t sz = sysconf(_SC_GETPW_R_SIZE_MAX);
294 	if (sz == -1)
295 		sz = 65536;	/* your guess is as good as mine... */
296 
297 	/*
298 	 * sysconf(_SC_GETPW_R_SIZE_MAX), under glibc, is documented to return
299 	 * the maximum needed size of the buffer, so we don't have to search.
300 	 */
301 	buf = malloc(sz);
302 	if (!buf)
303 		return -ENOMEM;
304 	getpwnam_r(user, &pw, buf, sz, &ppw);
305 	/*
306 	 * We're safe to free the buffer here. The strings inside |pw| point
307 	 * inside |buf|, but we don't use any of them; this leaves the pointers
308 	 * dangling but it's safe. |ppw| points at |pw| if getpwnam_r(3)
309 	 * succeeded.
310 	 */
311 	free(buf);
312 	/* getpwnam_r(3) does *not* set errno when |ppw| is NULL. */
313 	if (!ppw)
314 		return -1;
315 	minijail_change_uid(j, ppw->pw_uid);
316 	j->user = strdup(user);
317 	if (!j->user)
318 		return -ENOMEM;
319 	j->usergid = ppw->pw_gid;
320 	return 0;
321 }
322 
minijail_change_group(struct minijail * j,const char * group)323 int API minijail_change_group(struct minijail *j, const char *group)
324 {
325 	char *buf = NULL;
326 	struct group gr;
327 	struct group *pgr = NULL;
328 	ssize_t sz = sysconf(_SC_GETGR_R_SIZE_MAX);
329 	if (sz == -1)
330 		sz = 65536;	/* and mine is as good as yours, really */
331 
332 	/*
333 	 * sysconf(_SC_GETGR_R_SIZE_MAX), under glibc, is documented to return
334 	 * the maximum needed size of the buffer, so we don't have to search.
335 	 */
336 	buf = malloc(sz);
337 	if (!buf)
338 		return -ENOMEM;
339 	getgrnam_r(group, &gr, buf, sz, &pgr);
340 	/*
341 	 * We're safe to free the buffer here. The strings inside gr point
342 	 * inside buf, but we don't use any of them; this leaves the pointers
343 	 * dangling but it's safe. pgr points at gr if getgrnam_r succeeded.
344 	 */
345 	free(buf);
346 	/* getgrnam_r(3) does *not* set errno when |pgr| is NULL. */
347 	if (!pgr)
348 		return -1;
349 	minijail_change_gid(j, pgr->gr_gid);
350 	return 0;
351 }
352 
minijail_use_seccomp(struct minijail * j)353 void API minijail_use_seccomp(struct minijail *j)
354 {
355 	j->flags.seccomp = 1;
356 }
357 
minijail_no_new_privs(struct minijail * j)358 void API minijail_no_new_privs(struct minijail *j)
359 {
360 	j->flags.no_new_privs = 1;
361 }
362 
minijail_use_seccomp_filter(struct minijail * j)363 void API minijail_use_seccomp_filter(struct minijail *j)
364 {
365 	j->flags.seccomp_filter = 1;
366 }
367 
minijail_set_seccomp_filter_tsync(struct minijail * j)368 void API minijail_set_seccomp_filter_tsync(struct minijail *j)
369 {
370 	if (j->filter_len > 0 && j->filter_prog != NULL) {
371 		die("minijail_set_seccomp_filter_tsync() must be called "
372 		    "before minijail_parse_seccomp_filters()");
373 	}
374 	j->flags.seccomp_filter_tsync = 1;
375 }
376 
minijail_log_seccomp_filter_failures(struct minijail * j)377 void API minijail_log_seccomp_filter_failures(struct minijail *j)
378 {
379 	if (j->filter_len > 0 && j->filter_prog != NULL) {
380 		die("minijail_log_seccomp_filter_failures() must be called "
381 		    "before minijail_parse_seccomp_filters()");
382 	}
383 	j->flags.seccomp_filter_logging = 1;
384 }
385 
minijail_use_caps(struct minijail * j,uint64_t capmask)386 void API minijail_use_caps(struct minijail *j, uint64_t capmask)
387 {
388 	/*
389 	 * 'minijail_use_caps' configures a runtime-capabilities-only
390 	 * environment, including a bounding set matching the thread's runtime
391 	 * (permitted|inheritable|effective) sets.
392 	 * Therefore, it will override any existing bounding set configurations
393 	 * since the latter would allow gaining extra runtime capabilities from
394 	 * file capabilities.
395 	 */
396 	if (j->flags.capbset_drop) {
397 		warn("overriding bounding set configuration");
398 		j->cap_bset = 0;
399 		j->flags.capbset_drop = 0;
400 	}
401 	j->caps = capmask;
402 	j->flags.use_caps = 1;
403 }
404 
minijail_capbset_drop(struct minijail * j,uint64_t capmask)405 void API minijail_capbset_drop(struct minijail *j, uint64_t capmask)
406 {
407 	if (j->flags.use_caps) {
408 		/*
409 		 * 'minijail_use_caps' will have already configured a capability
410 		 * bounding set matching the (permitted|inheritable|effective)
411 		 * sets. Abort if the user tries to configure a separate
412 		 * bounding set. 'minijail_capbset_drop' and 'minijail_use_caps'
413 		 * are mutually exclusive.
414 		 */
415 		die("runtime capabilities already configured, can't drop "
416 		    "bounding set separately");
417 	}
418 	j->cap_bset = capmask;
419 	j->flags.capbset_drop = 1;
420 }
421 
minijail_reset_signal_mask(struct minijail * j)422 void API minijail_reset_signal_mask(struct minijail *j)
423 {
424 	j->flags.reset_signal_mask = 1;
425 }
426 
minijail_namespace_vfs(struct minijail * j)427 void API minijail_namespace_vfs(struct minijail *j)
428 {
429 	j->flags.vfs = 1;
430 }
431 
minijail_namespace_enter_vfs(struct minijail * j,const char * ns_path)432 void API minijail_namespace_enter_vfs(struct minijail *j, const char *ns_path)
433 {
434 	int ns_fd = open(ns_path, O_RDONLY | O_CLOEXEC);
435 	if (ns_fd < 0) {
436 		pdie("failed to open namespace '%s'", ns_path);
437 	}
438 	j->mountns_fd = ns_fd;
439 	j->flags.enter_vfs = 1;
440 }
441 
minijail_new_session_keyring(struct minijail * j)442 void API minijail_new_session_keyring(struct minijail *j)
443 {
444 	j->flags.new_session_keyring = 1;
445 }
446 
minijail_skip_remount_private(struct minijail * j)447 void API minijail_skip_remount_private(struct minijail *j)
448 {
449 	j->flags.skip_remount_private = 1;
450 }
451 
minijail_namespace_pids(struct minijail * j)452 void API minijail_namespace_pids(struct minijail *j)
453 {
454 	j->flags.vfs = 1;
455 	j->flags.remount_proc_ro = 1;
456 	j->flags.pids = 1;
457 	j->flags.do_init = 1;
458 }
459 
minijail_namespace_ipc(struct minijail * j)460 void API minijail_namespace_ipc(struct minijail *j)
461 {
462 	j->flags.ipc = 1;
463 }
464 
minijail_namespace_net(struct minijail * j)465 void API minijail_namespace_net(struct minijail *j)
466 {
467 	j->flags.net = 1;
468 }
469 
minijail_namespace_enter_net(struct minijail * j,const char * ns_path)470 void API minijail_namespace_enter_net(struct minijail *j, const char *ns_path)
471 {
472 	int ns_fd = open(ns_path, O_RDONLY | O_CLOEXEC);
473 	if (ns_fd < 0) {
474 		pdie("failed to open namespace '%s'", ns_path);
475 	}
476 	j->netns_fd = ns_fd;
477 	j->flags.enter_net = 1;
478 }
479 
minijail_namespace_cgroups(struct minijail * j)480 void API minijail_namespace_cgroups(struct minijail *j)
481 {
482 	j->flags.ns_cgroups = 1;
483 }
484 
minijail_close_open_fds(struct minijail * j)485 void API minijail_close_open_fds(struct minijail *j)
486 {
487 	j->flags.close_open_fds = 1;
488 }
489 
minijail_remount_proc_readonly(struct minijail * j)490 void API minijail_remount_proc_readonly(struct minijail *j)
491 {
492 	j->flags.vfs = 1;
493 	j->flags.remount_proc_ro = 1;
494 }
495 
minijail_namespace_user(struct minijail * j)496 void API minijail_namespace_user(struct minijail *j)
497 {
498 	j->flags.userns = 1;
499 }
500 
minijail_namespace_user_disable_setgroups(struct minijail * j)501 void API minijail_namespace_user_disable_setgroups(struct minijail *j)
502 {
503 	j->flags.disable_setgroups = 1;
504 }
505 
minijail_uidmap(struct minijail * j,const char * uidmap)506 int API minijail_uidmap(struct minijail *j, const char *uidmap)
507 {
508 	j->uidmap = strdup(uidmap);
509 	if (!j->uidmap)
510 		return -ENOMEM;
511 	char *ch;
512 	for (ch = j->uidmap; *ch; ch++) {
513 		if (*ch == ',')
514 			*ch = '\n';
515 	}
516 	return 0;
517 }
518 
minijail_gidmap(struct minijail * j,const char * gidmap)519 int API minijail_gidmap(struct minijail *j, const char *gidmap)
520 {
521 	j->gidmap = strdup(gidmap);
522 	if (!j->gidmap)
523 		return -ENOMEM;
524 	char *ch;
525 	for (ch = j->gidmap; *ch; ch++) {
526 		if (*ch == ',')
527 			*ch = '\n';
528 	}
529 	return 0;
530 }
531 
minijail_inherit_usergroups(struct minijail * j)532 void API minijail_inherit_usergroups(struct minijail *j)
533 {
534 	j->flags.inherit_suppl_gids = 1;
535 }
536 
minijail_run_as_init(struct minijail * j)537 void API minijail_run_as_init(struct minijail *j)
538 {
539 	/*
540 	 * Since the jailed program will become 'init' in the new PID namespace,
541 	 * Minijail does not need to fork an 'init' process.
542 	 */
543 	j->flags.do_init = 0;
544 }
545 
minijail_enter_chroot(struct minijail * j,const char * dir)546 int API minijail_enter_chroot(struct minijail *j, const char *dir)
547 {
548 	if (j->chrootdir)
549 		return -EINVAL;
550 	j->chrootdir = strdup(dir);
551 	if (!j->chrootdir)
552 		return -ENOMEM;
553 	j->flags.chroot = 1;
554 	return 0;
555 }
556 
minijail_enter_pivot_root(struct minijail * j,const char * dir)557 int API minijail_enter_pivot_root(struct minijail *j, const char *dir)
558 {
559 	if (j->chrootdir)
560 		return -EINVAL;
561 	j->chrootdir = strdup(dir);
562 	if (!j->chrootdir)
563 		return -ENOMEM;
564 	j->flags.pivot_root = 1;
565 	return 0;
566 }
567 
minijail_get_original_path(struct minijail * j,const char * path_inside_chroot)568 char API *minijail_get_original_path(struct minijail *j,
569 				     const char *path_inside_chroot)
570 {
571 	struct mountpoint *b;
572 
573 	b = j->mounts_head;
574 	while (b) {
575 		/*
576 		 * If |path_inside_chroot| is the exact destination of a
577 		 * mount, then the original path is exactly the source of
578 		 * the mount.
579 		 *  for example: "-b /some/path/exe,/chroot/path/exe"
580 		 *    mount source = /some/path/exe, mount dest =
581 		 *    /chroot/path/exe Then when getting the original path of
582 		 *    "/chroot/path/exe", the source of that mount,
583 		 *    "/some/path/exe" is what should be returned.
584 		 */
585 		if (!strcmp(b->dest, path_inside_chroot))
586 			return strdup(b->src);
587 
588 		/*
589 		 * If |path_inside_chroot| is within the destination path of a
590 		 * mount, take the suffix of the chroot path relative to the
591 		 * mount destination path, and append it to the mount source
592 		 * path.
593 		 */
594 		if (!strncmp(b->dest, path_inside_chroot, strlen(b->dest))) {
595 			const char *relative_path =
596 				path_inside_chroot + strlen(b->dest);
597 			return path_join(b->src, relative_path);
598 		}
599 		b = b->next;
600 	}
601 
602 	/* If there is a chroot path, append |path_inside_chroot| to that. */
603 	if (j->chrootdir)
604 		return path_join(j->chrootdir, path_inside_chroot);
605 
606 	/* No chroot, so the path outside is the same as it is inside. */
607 	return strdup(path_inside_chroot);
608 }
609 
minijail_get_tmpfs_size(const struct minijail * j)610 size_t minijail_get_tmpfs_size(const struct minijail *j)
611 {
612 	return j->tmpfs_size;
613 }
614 
minijail_mount_tmp(struct minijail * j)615 void API minijail_mount_tmp(struct minijail *j)
616 {
617 	minijail_mount_tmp_size(j, 64 * 1024 * 1024);
618 }
619 
minijail_mount_tmp_size(struct minijail * j,size_t size)620 void API minijail_mount_tmp_size(struct minijail *j, size_t size)
621 {
622 	j->tmpfs_size = size;
623 	j->flags.mount_tmp = 1;
624 }
625 
minijail_write_pid_file(struct minijail * j,const char * path)626 int API minijail_write_pid_file(struct minijail *j, const char *path)
627 {
628 	j->pid_file_path = strdup(path);
629 	if (!j->pid_file_path)
630 		return -ENOMEM;
631 	j->flags.pid_file = 1;
632 	return 0;
633 }
634 
minijail_add_to_cgroup(struct minijail * j,const char * path)635 int API minijail_add_to_cgroup(struct minijail *j, const char *path)
636 {
637 	if (j->cgroup_count >= MAX_CGROUPS)
638 		return -ENOMEM;
639 	j->cgroups[j->cgroup_count] = strdup(path);
640 	if (!j->cgroups[j->cgroup_count])
641 		return -ENOMEM;
642 	j->cgroup_count++;
643 	j->flags.cgroups = 1;
644 	return 0;
645 }
646 
minijail_mount_with_data(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags,const char * data)647 int API minijail_mount_with_data(struct minijail *j, const char *src,
648 				 const char *dest, const char *type,
649 				 unsigned long flags, const char *data)
650 {
651 	struct mountpoint *m;
652 
653 	if (*dest != '/')
654 		return -EINVAL;
655 	m = calloc(1, sizeof(*m));
656 	if (!m)
657 		return -ENOMEM;
658 	m->dest = strdup(dest);
659 	if (!m->dest)
660 		goto error;
661 	m->src = strdup(src);
662 	if (!m->src)
663 		goto error;
664 	m->type = strdup(type);
665 	if (!m->type)
666 		goto error;
667 	if (data) {
668 		m->data = strdup(data);
669 		if (!m->data)
670 			goto error;
671 		m->has_data = 1;
672 	}
673 	m->flags = flags;
674 
675 	info("mount %s -> %s type '%s'", src, dest, type);
676 
677 	/*
678 	 * Force vfs namespacing so the mounts don't leak out into the
679 	 * containing vfs namespace.
680 	 */
681 	minijail_namespace_vfs(j);
682 
683 	if (j->mounts_tail)
684 		j->mounts_tail->next = m;
685 	else
686 		j->mounts_head = m;
687 	j->mounts_tail = m;
688 	j->mounts_count++;
689 
690 	return 0;
691 
692 error:
693 	free(m->type);
694 	free(m->src);
695 	free(m->dest);
696 	free(m);
697 	return -ENOMEM;
698 }
699 
minijail_mount(struct minijail * j,const char * src,const char * dest,const char * type,unsigned long flags)700 int API minijail_mount(struct minijail *j, const char *src, const char *dest,
701 		       const char *type, unsigned long flags)
702 {
703 	return minijail_mount_with_data(j, src, dest, type, flags, NULL);
704 }
705 
minijail_bind(struct minijail * j,const char * src,const char * dest,int writeable)706 int API minijail_bind(struct minijail *j, const char *src, const char *dest,
707 		      int writeable)
708 {
709 	unsigned long flags = MS_BIND;
710 
711 	if (!writeable)
712 		flags |= MS_RDONLY;
713 
714 	return minijail_mount(j, src, dest, "", flags);
715 }
716 
clear_seccomp_options(struct minijail * j)717 static void clear_seccomp_options(struct minijail *j)
718 {
719 	j->flags.seccomp_filter = 0;
720 	j->flags.seccomp_filter_tsync = 0;
721 	j->flags.seccomp_filter_logging = 0;
722 	j->filter_len = 0;
723 	j->filter_prog = NULL;
724 	j->flags.no_new_privs = 0;
725 }
726 
seccomp_should_parse_filters(struct minijail * j)727 static int seccomp_should_parse_filters(struct minijail *j)
728 {
729 	if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL) == -1) {
730 		/*
731 		 * |errno| will be set to EINVAL when seccomp has not been
732 		 * compiled into the kernel. On certain platforms and kernel
733 		 * versions this is not a fatal failure. In that case, and only
734 		 * in that case, disable seccomp and skip loading the filters.
735 		 */
736 		if ((errno == EINVAL) && seccomp_can_softfail()) {
737 			warn("not loading seccomp filters, seccomp filter not "
738 			     "supported");
739 			clear_seccomp_options(j);
740 			return 0;
741 		}
742 		/*
743 		 * If |errno| != EINVAL or seccomp_can_softfail() is false,
744 		 * we can proceed. Worst case scenario minijail_enter() will
745 		 * abort() if seccomp fails.
746 		 */
747 	}
748 	if (j->flags.seccomp_filter_tsync) {
749 		/* Are the seccomp(2) syscall and the TSYNC option supported? */
750 		if (sys_seccomp(SECCOMP_SET_MODE_FILTER,
751 				SECCOMP_FILTER_FLAG_TSYNC, NULL) == -1) {
752 			int saved_errno = errno;
753 			if (saved_errno == ENOSYS && seccomp_can_softfail()) {
754 				warn("seccomp(2) syscall not supported");
755 				clear_seccomp_options(j);
756 				return 0;
757 			} else if (saved_errno == EINVAL &&
758 				   seccomp_can_softfail()) {
759 				warn(
760 				    "seccomp filter thread sync not supported");
761 				clear_seccomp_options(j);
762 				return 0;
763 			}
764 			/*
765 			 * Similar logic here. If seccomp_can_softfail() is
766 			 * false, or |errno| != ENOSYS, or |errno| != EINVAL,
767 			 * we can proceed. Worst case scenario minijail_enter()
768 			 * will abort() if seccomp or TSYNC fail.
769 			 */
770 		}
771 	}
772 	return 1;
773 }
774 
parse_seccomp_filters(struct minijail * j,FILE * policy_file)775 static int parse_seccomp_filters(struct minijail *j, FILE *policy_file)
776 {
777 	struct sock_fprog *fprog = malloc(sizeof(struct sock_fprog));
778 	int use_ret_trap =
779 	    j->flags.seccomp_filter_tsync || j->flags.seccomp_filter_logging;
780 	int allow_logging = j->flags.seccomp_filter_logging;
781 
782 	if (compile_filter(policy_file, fprog, use_ret_trap, allow_logging)) {
783 		free(fprog);
784 		return -1;
785 	}
786 
787 	j->filter_len = fprog->len;
788 	j->filter_prog = fprog;
789 	return 0;
790 }
791 
minijail_parse_seccomp_filters(struct minijail * j,const char * path)792 void API minijail_parse_seccomp_filters(struct minijail *j, const char *path)
793 {
794 	if (!seccomp_should_parse_filters(j))
795 		return;
796 
797 	FILE *file = fopen(path, "r");
798 	if (!file) {
799 		pdie("failed to open seccomp filter file '%s'", path);
800 	}
801 
802 	if (parse_seccomp_filters(j, file) != 0) {
803 		die("failed to compile seccomp filter BPF program in '%s'",
804 		    path);
805 	}
806 	fclose(file);
807 }
808 
minijail_parse_seccomp_filters_from_fd(struct minijail * j,int fd)809 void API minijail_parse_seccomp_filters_from_fd(struct minijail *j, int fd)
810 {
811 	if (!seccomp_should_parse_filters(j))
812 		return;
813 
814 	FILE *file = fdopen(fd, "r");
815 	if (!file) {
816 		pdie("failed to associate stream with fd %d", fd);
817 	}
818 
819 	if (parse_seccomp_filters(j, file) != 0) {
820 		die("failed to compile seccomp filter BPF program from fd %d",
821 		    fd);
822 	}
823 	fclose(file);
824 }
825 
minijail_use_alt_syscall(struct minijail * j,const char * table)826 int API minijail_use_alt_syscall(struct minijail *j, const char *table)
827 {
828 	j->alt_syscall_table = strdup(table);
829 	if (!j->alt_syscall_table)
830 		return -ENOMEM;
831 	j->flags.alt_syscall = 1;
832 	return 0;
833 }
834 
835 struct marshal_state {
836 	size_t available;
837 	size_t total;
838 	char *buf;
839 };
840 
marshal_state_init(struct marshal_state * state,char * buf,size_t available)841 void marshal_state_init(struct marshal_state *state, char *buf,
842 			size_t available)
843 {
844 	state->available = available;
845 	state->buf = buf;
846 	state->total = 0;
847 }
848 
marshal_append(struct marshal_state * state,void * src,size_t length)849 void marshal_append(struct marshal_state *state, void *src, size_t length)
850 {
851 	size_t copy_len = MIN(state->available, length);
852 
853 	/* Up to |available| will be written. */
854 	if (copy_len) {
855 		memcpy(state->buf, src, copy_len);
856 		state->buf += copy_len;
857 		state->available -= copy_len;
858 	}
859 	/* |total| will contain the expected length. */
860 	state->total += length;
861 }
862 
marshal_mount(struct marshal_state * state,const struct mountpoint * m)863 void marshal_mount(struct marshal_state *state, const struct mountpoint *m)
864 {
865 	marshal_append(state, m->src, strlen(m->src) + 1);
866 	marshal_append(state, m->dest, strlen(m->dest) + 1);
867 	marshal_append(state, m->type, strlen(m->type) + 1);
868 	marshal_append(state, (char *)&m->has_data, sizeof(m->has_data));
869 	if (m->has_data)
870 		marshal_append(state, m->data, strlen(m->data) + 1);
871 	marshal_append(state, (char *)&m->flags, sizeof(m->flags));
872 }
873 
minijail_marshal_helper(struct marshal_state * state,const struct minijail * j)874 void minijail_marshal_helper(struct marshal_state *state,
875 			     const struct minijail *j)
876 {
877 	struct mountpoint *m = NULL;
878 	size_t i;
879 
880 	marshal_append(state, (char *)j, sizeof(*j));
881 	if (j->user)
882 		marshal_append(state, j->user, strlen(j->user) + 1);
883 	if (j->suppl_gid_list) {
884 		marshal_append(state, j->suppl_gid_list,
885 			       j->suppl_gid_count * sizeof(gid_t));
886 	}
887 	if (j->chrootdir)
888 		marshal_append(state, j->chrootdir, strlen(j->chrootdir) + 1);
889 	if (j->alt_syscall_table) {
890 		marshal_append(state, j->alt_syscall_table,
891 			       strlen(j->alt_syscall_table) + 1);
892 	}
893 	if (j->flags.seccomp_filter && j->filter_prog) {
894 		struct sock_fprog *fp = j->filter_prog;
895 		marshal_append(state, (char *)fp->filter,
896 			       fp->len * sizeof(struct sock_filter));
897 	}
898 	for (m = j->mounts_head; m; m = m->next) {
899 		marshal_mount(state, m);
900 	}
901 	for (i = 0; i < j->cgroup_count; ++i)
902 		marshal_append(state, j->cgroups[i], strlen(j->cgroups[i]) + 1);
903 }
904 
minijail_size(const struct minijail * j)905 size_t API minijail_size(const struct minijail *j)
906 {
907 	struct marshal_state state;
908 	marshal_state_init(&state, NULL, 0);
909 	minijail_marshal_helper(&state, j);
910 	return state.total;
911 }
912 
minijail_marshal(const struct minijail * j,char * buf,size_t available)913 int minijail_marshal(const struct minijail *j, char *buf, size_t available)
914 {
915 	struct marshal_state state;
916 	marshal_state_init(&state, buf, available);
917 	minijail_marshal_helper(&state, j);
918 	return (state.total > available);
919 }
920 
minijail_unmarshal(struct minijail * j,char * serialized,size_t length)921 int minijail_unmarshal(struct minijail *j, char *serialized, size_t length)
922 {
923 	size_t i;
924 	size_t count;
925 	int ret = -EINVAL;
926 
927 	if (length < sizeof(*j))
928 		goto out;
929 	memcpy((void *)j, serialized, sizeof(*j));
930 	serialized += sizeof(*j);
931 	length -= sizeof(*j);
932 
933 	/* Potentially stale pointers not used as signals. */
934 	j->pid_file_path = NULL;
935 	j->uidmap = NULL;
936 	j->gidmap = NULL;
937 	j->mounts_head = NULL;
938 	j->mounts_tail = NULL;
939 	j->filter_prog = NULL;
940 
941 	if (j->user) {		/* stale pointer */
942 		char *user = consumestr(&serialized, &length);
943 		if (!user)
944 			goto clear_pointers;
945 		j->user = strdup(user);
946 		if (!j->user)
947 			goto clear_pointers;
948 	}
949 
950 	if (j->suppl_gid_list) {	/* stale pointer */
951 		if (j->suppl_gid_count > NGROUPS_MAX) {
952 			goto bad_gid_list;
953 		}
954 		size_t gid_list_size = j->suppl_gid_count * sizeof(gid_t);
955 		void *gid_list_bytes =
956 		    consumebytes(gid_list_size, &serialized, &length);
957 		if (!gid_list_bytes)
958 			goto bad_gid_list;
959 
960 		j->suppl_gid_list = calloc(j->suppl_gid_count, sizeof(gid_t));
961 		if (!j->suppl_gid_list)
962 			goto bad_gid_list;
963 
964 		memcpy(j->suppl_gid_list, gid_list_bytes, gid_list_size);
965 	}
966 
967 	if (j->chrootdir) {	/* stale pointer */
968 		char *chrootdir = consumestr(&serialized, &length);
969 		if (!chrootdir)
970 			goto bad_chrootdir;
971 		j->chrootdir = strdup(chrootdir);
972 		if (!j->chrootdir)
973 			goto bad_chrootdir;
974 	}
975 
976 	if (j->alt_syscall_table) {	/* stale pointer */
977 		char *alt_syscall_table = consumestr(&serialized, &length);
978 		if (!alt_syscall_table)
979 			goto bad_syscall_table;
980 		j->alt_syscall_table = strdup(alt_syscall_table);
981 		if (!j->alt_syscall_table)
982 			goto bad_syscall_table;
983 	}
984 
985 	if (j->flags.seccomp_filter && j->filter_len > 0) {
986 		size_t ninstrs = j->filter_len;
987 		if (ninstrs > (SIZE_MAX / sizeof(struct sock_filter)) ||
988 		    ninstrs > USHRT_MAX)
989 			goto bad_filters;
990 
991 		size_t program_len = ninstrs * sizeof(struct sock_filter);
992 		void *program = consumebytes(program_len, &serialized, &length);
993 		if (!program)
994 			goto bad_filters;
995 
996 		j->filter_prog = malloc(sizeof(struct sock_fprog));
997 		if (!j->filter_prog)
998 			goto bad_filters;
999 
1000 		j->filter_prog->len = ninstrs;
1001 		j->filter_prog->filter = malloc(program_len);
1002 		if (!j->filter_prog->filter)
1003 			goto bad_filter_prog_instrs;
1004 
1005 		memcpy(j->filter_prog->filter, program, program_len);
1006 	}
1007 
1008 	count = j->mounts_count;
1009 	j->mounts_count = 0;
1010 	for (i = 0; i < count; ++i) {
1011 		unsigned long *flags;
1012 		int *has_data;
1013 		const char *dest;
1014 		const char *type;
1015 		const char *data = NULL;
1016 		const char *src = consumestr(&serialized, &length);
1017 		if (!src)
1018 			goto bad_mounts;
1019 		dest = consumestr(&serialized, &length);
1020 		if (!dest)
1021 			goto bad_mounts;
1022 		type = consumestr(&serialized, &length);
1023 		if (!type)
1024 			goto bad_mounts;
1025 		has_data = consumebytes(sizeof(*has_data), &serialized,
1026 					&length);
1027 		if (!has_data)
1028 			goto bad_mounts;
1029 		if (*has_data) {
1030 			data = consumestr(&serialized, &length);
1031 			if (!data)
1032 				goto bad_mounts;
1033 		}
1034 		flags = consumebytes(sizeof(*flags), &serialized, &length);
1035 		if (!flags)
1036 			goto bad_mounts;
1037 		if (minijail_mount_with_data(j, src, dest, type, *flags, data))
1038 			goto bad_mounts;
1039 	}
1040 
1041 	count = j->cgroup_count;
1042 	j->cgroup_count = 0;
1043 	for (i = 0; i < count; ++i) {
1044 		char *cgroup = consumestr(&serialized, &length);
1045 		if (!cgroup)
1046 			goto bad_cgroups;
1047 		j->cgroups[i] = strdup(cgroup);
1048 		if (!j->cgroups[i])
1049 			goto bad_cgroups;
1050 		++j->cgroup_count;
1051 	}
1052 
1053 	return 0;
1054 
1055 bad_cgroups:
1056 	while (j->mounts_head) {
1057 		struct mountpoint *m = j->mounts_head;
1058 		j->mounts_head = j->mounts_head->next;
1059 		free(m->data);
1060 		free(m->type);
1061 		free(m->dest);
1062 		free(m->src);
1063 		free(m);
1064 	}
1065 	for (i = 0; i < j->cgroup_count; ++i)
1066 		free(j->cgroups[i]);
1067 bad_mounts:
1068 	if (j->flags.seccomp_filter && j->filter_len > 0) {
1069 		free(j->filter_prog->filter);
1070 		free(j->filter_prog);
1071 	}
1072 bad_filter_prog_instrs:
1073 	if (j->filter_prog)
1074 		free(j->filter_prog);
1075 bad_filters:
1076 	if (j->alt_syscall_table)
1077 		free(j->alt_syscall_table);
1078 bad_syscall_table:
1079 	if (j->chrootdir)
1080 		free(j->chrootdir);
1081 bad_chrootdir:
1082 	if (j->suppl_gid_list)
1083 		free(j->suppl_gid_list);
1084 bad_gid_list:
1085 	if (j->user)
1086 		free(j->user);
1087 clear_pointers:
1088 	j->user = NULL;
1089 	j->suppl_gid_list = NULL;
1090 	j->chrootdir = NULL;
1091 	j->alt_syscall_table = NULL;
1092 	j->cgroup_count = 0;
1093 out:
1094 	return ret;
1095 }
1096 
1097 /*
1098  * setup_mount_destination: Ensures the mount target exists.
1099  * Creates it if needed and possible.
1100  */
setup_mount_destination(const char * source,const char * dest,uid_t uid,uid_t gid)1101 static int setup_mount_destination(const char *source, const char *dest,
1102 				   uid_t uid, uid_t gid)
1103 {
1104 	int rc;
1105 	struct stat st_buf;
1106 
1107 	rc = stat(dest, &st_buf);
1108 	if (rc == 0) /* destination exists */
1109 		return 0;
1110 
1111 	/*
1112 	 * Try to create the destination.
1113 	 * Either make a directory or touch a file depending on the source type.
1114 	 * If the source doesn't exist, assume it is a filesystem type such as
1115 	 * "tmpfs" and create a directory to mount it on.
1116 	 */
1117 	rc = stat(source, &st_buf);
1118 	if (rc || S_ISDIR(st_buf.st_mode) || S_ISBLK(st_buf.st_mode)) {
1119 		if (mkdir(dest, 0700))
1120 			return -errno;
1121 	} else {
1122 		int fd = open(dest, O_RDWR | O_CREAT, 0700);
1123 		if (fd < 0)
1124 			return -errno;
1125 		close(fd);
1126 	}
1127 	return chown(dest, uid, gid);
1128 }
1129 
1130 /*
1131  * mount_one: Applies mounts from @m for @j, recursing as needed.
1132  * @j Minijail these mounts are for
1133  * @m Head of list of mounts
1134  *
1135  * Returns 0 for success.
1136  */
mount_one(const struct minijail * j,struct mountpoint * m)1137 static int mount_one(const struct minijail *j, struct mountpoint *m)
1138 {
1139 	int ret;
1140 	char *dest;
1141 	int remount_ro = 0;
1142 
1143 	/* |dest| has a leading "/". */
1144 	if (asprintf(&dest, "%s%s", j->chrootdir, m->dest) < 0)
1145 		return -ENOMEM;
1146 
1147 	if (setup_mount_destination(m->src, dest, j->uid, j->gid))
1148 		pdie("creating mount target '%s' failed", dest);
1149 
1150 	/*
1151 	 * R/O bind mounts have to be remounted since 'bind' and 'ro'
1152 	 * can't both be specified in the original bind mount.
1153 	 * Remount R/O after the initial mount.
1154 	 */
1155 	if ((m->flags & MS_BIND) && (m->flags & MS_RDONLY)) {
1156 		remount_ro = 1;
1157 		m->flags &= ~MS_RDONLY;
1158 	}
1159 
1160 	ret = mount(m->src, dest, m->type, m->flags, m->data);
1161 	if (ret)
1162 		pdie("mount: %s -> %s", m->src, dest);
1163 
1164 	if (remount_ro) {
1165 		m->flags |= MS_RDONLY;
1166 		ret = mount(m->src, dest, NULL,
1167 			    m->flags | MS_REMOUNT, m->data);
1168 		if (ret)
1169 			pdie("bind ro: %s -> %s", m->src, dest);
1170 	}
1171 
1172 	free(dest);
1173 	if (m->next)
1174 		return mount_one(j, m->next);
1175 	return ret;
1176 }
1177 
enter_chroot(const struct minijail * j)1178 static int enter_chroot(const struct minijail *j)
1179 {
1180 	int ret;
1181 
1182 	if (j->mounts_head && (ret = mount_one(j, j->mounts_head)))
1183 		return ret;
1184 
1185 	if (chroot(j->chrootdir))
1186 		return -errno;
1187 
1188 	if (chdir("/"))
1189 		return -errno;
1190 
1191 	return 0;
1192 }
1193 
enter_pivot_root(const struct minijail * j)1194 static int enter_pivot_root(const struct minijail *j)
1195 {
1196 	int ret, oldroot, newroot;
1197 
1198 	if (j->mounts_head && (ret = mount_one(j, j->mounts_head)))
1199 		return ret;
1200 
1201 	/*
1202 	 * Keep the fd for both old and new root.
1203 	 * It will be used in fchdir(2) later.
1204 	 */
1205 	oldroot = open("/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1206 	if (oldroot < 0)
1207 		pdie("failed to open / for fchdir");
1208 	newroot = open(j->chrootdir, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1209 	if (newroot < 0)
1210 		pdie("failed to open %s for fchdir", j->chrootdir);
1211 
1212 	/*
1213 	 * To ensure j->chrootdir is the root of a filesystem,
1214 	 * do a self bind mount.
1215 	 */
1216 	if (mount(j->chrootdir, j->chrootdir, "bind", MS_BIND | MS_REC, ""))
1217 		pdie("failed to bind mount '%s'", j->chrootdir);
1218 	if (chdir(j->chrootdir))
1219 		return -errno;
1220 	if (syscall(SYS_pivot_root, ".", "."))
1221 		pdie("pivot_root");
1222 
1223 	/*
1224 	 * Now the old root is mounted on top of the new root. Use fchdir(2) to
1225 	 * change to the old root and unmount it.
1226 	 */
1227 	if (fchdir(oldroot))
1228 		pdie("failed to fchdir to old /");
1229 
1230 	/*
1231 	 * If j->flags.skip_remount_private was enabled for minijail_enter(),
1232 	 * there could be a shared mount point under |oldroot|. In that case,
1233 	 * mounts under this shared mount point will be unmounted below, and
1234 	 * this unmounting will propagate to the original mount namespace
1235 	 * (because the mount point is shared). To prevent this unexpected
1236 	 * unmounting, remove these mounts from their peer groups by recursively
1237 	 * remounting them as MS_PRIVATE.
1238 	 */
1239 	if (mount(NULL, ".", NULL, MS_REC | MS_PRIVATE, NULL))
1240 		pdie("failed to mount(/, private) before umount(/)");
1241 	/* The old root might be busy, so use lazy unmount. */
1242 	if (umount2(".", MNT_DETACH))
1243 		pdie("umount(/)");
1244 	/* Change back to the new root. */
1245 	if (fchdir(newroot))
1246 		return -errno;
1247 	if (close(oldroot))
1248 		return -errno;
1249 	if (close(newroot))
1250 		return -errno;
1251 	if (chroot("/"))
1252 		return -errno;
1253 	/* Set correct CWD for getcwd(3). */
1254 	if (chdir("/"))
1255 		return -errno;
1256 
1257 	return 0;
1258 }
1259 
mount_tmp(const struct minijail * j)1260 static int mount_tmp(const struct minijail *j)
1261 {
1262 	const char fmt[] = "size=%zu,mode=1777";
1263 	/* Count for the user storing ULLONG_MAX literally + extra space. */
1264 	char data[sizeof(fmt) + sizeof("18446744073709551615ULL")];
1265 	int ret;
1266 
1267 	ret = snprintf(data, sizeof(data), fmt, j->tmpfs_size);
1268 
1269 	if (ret <= 0)
1270 		pdie("tmpfs size spec error");
1271 	else if ((size_t)ret >= sizeof(data))
1272 		pdie("tmpfs size spec too large");
1273 	return mount("none", "/tmp", "tmpfs", MS_NODEV | MS_NOEXEC | MS_NOSUID,
1274 		     data);
1275 }
1276 
remount_proc_readonly(const struct minijail * j)1277 static int remount_proc_readonly(const struct minijail *j)
1278 {
1279 	const char *kProcPath = "/proc";
1280 	const unsigned int kSafeFlags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
1281 	/*
1282 	 * Right now, we're holding a reference to our parent's old mount of
1283 	 * /proc in our namespace, which means using MS_REMOUNT here would
1284 	 * mutate our parent's mount as well, even though we're in a VFS
1285 	 * namespace (!). Instead, remove their mount from our namespace lazily
1286 	 * (MNT_DETACH) and make our own.
1287 	 */
1288 	if (umount2(kProcPath, MNT_DETACH)) {
1289 		/*
1290 		 * If we are in a new user namespace, umount(2) will fail.
1291 		 * See http://man7.org/linux/man-pages/man7/user_namespaces.7.html
1292 		 */
1293 		if (j->flags.userns) {
1294 			info("umount(/proc, MNT_DETACH) failed, "
1295 			     "this is expected when using user namespaces");
1296 		} else {
1297 			return -errno;
1298 		}
1299 	}
1300 	if (mount("proc", kProcPath, "proc", kSafeFlags | MS_RDONLY, ""))
1301 		return -errno;
1302 	return 0;
1303 }
1304 
kill_child_and_die(const struct minijail * j,const char * msg)1305 static void kill_child_and_die(const struct minijail *j, const char *msg)
1306 {
1307 	kill(j->initpid, SIGKILL);
1308 	die("%s", msg);
1309 }
1310 
write_pid_file_or_die(const struct minijail * j)1311 static void write_pid_file_or_die(const struct minijail *j)
1312 {
1313 	if (write_pid_to_path(j->initpid, j->pid_file_path))
1314 		kill_child_and_die(j, "failed to write pid file");
1315 }
1316 
add_to_cgroups_or_die(const struct minijail * j)1317 static void add_to_cgroups_or_die(const struct minijail *j)
1318 {
1319 	size_t i;
1320 
1321 	for (i = 0; i < j->cgroup_count; ++i) {
1322 		if (write_pid_to_path(j->initpid, j->cgroups[i]))
1323 			kill_child_and_die(j, "failed to add to cgroups");
1324 	}
1325 }
1326 
write_ugid_maps_or_die(const struct minijail * j)1327 static void write_ugid_maps_or_die(const struct minijail *j)
1328 {
1329 	if (j->uidmap && write_proc_file(j->initpid, j->uidmap, "uid_map") != 0)
1330 		kill_child_and_die(j, "failed to write uid_map");
1331 	if (j->gidmap && j->flags.disable_setgroups) {
1332 		/* Older kernels might not have the /proc/<pid>/setgroups files. */
1333 		int ret = write_proc_file(j->initpid, "deny", "setgroups");
1334 		if (ret != 0) {
1335 			if (ret == -ENOENT) {
1336 				/* See http://man7.org/linux/man-pages/man7/user_namespaces.7.html. */
1337 				warn("could not disable setgroups(2)");
1338 			} else
1339 				kill_child_and_die(j, "failed to disable setgroups(2)");
1340 		}
1341 	}
1342 	if (j->gidmap && write_proc_file(j->initpid, j->gidmap, "gid_map") != 0)
1343 		kill_child_and_die(j, "failed to write gid_map");
1344 }
1345 
enter_user_namespace(const struct minijail * j)1346 static void enter_user_namespace(const struct minijail *j)
1347 {
1348 	if (j->uidmap && setresuid(0, 0, 0))
1349 		pdie("user_namespaces: setresuid(0, 0, 0) failed");
1350 	if (j->gidmap && setresgid(0, 0, 0))
1351 		pdie("user_namespaces: setresgid(0, 0, 0) failed");
1352 }
1353 
parent_setup_complete(int * pipe_fds)1354 static void parent_setup_complete(int *pipe_fds)
1355 {
1356 	close(pipe_fds[0]);
1357 	close(pipe_fds[1]);
1358 }
1359 
1360 /*
1361  * wait_for_parent_setup: Called by the child process to wait for any
1362  * further parent-side setup to complete before continuing.
1363  */
wait_for_parent_setup(int * pipe_fds)1364 static void wait_for_parent_setup(int *pipe_fds)
1365 {
1366 	char buf;
1367 
1368 	close(pipe_fds[1]);
1369 
1370 	/* Wait for parent to complete setup and close the pipe. */
1371 	if (read(pipe_fds[0], &buf, 1) != 0)
1372 		die("failed to sync with parent");
1373 	close(pipe_fds[0]);
1374 }
1375 
drop_ugid(const struct minijail * j)1376 static void drop_ugid(const struct minijail *j)
1377 {
1378 	if (j->flags.inherit_suppl_gids + j->flags.keep_suppl_gids +
1379 	    j->flags.set_suppl_gids > 1) {
1380 		die("can only do one of inherit, keep, or set supplementary "
1381 		    "groups");
1382 	}
1383 
1384 	if (j->flags.inherit_suppl_gids) {
1385 		if (initgroups(j->user, j->usergid))
1386 			pdie("initgroups(%s, %d) failed", j->user, j->usergid);
1387 	} else if (j->flags.set_suppl_gids) {
1388 		if (setgroups(j->suppl_gid_count, j->suppl_gid_list))
1389 			pdie("setgroups(suppl_gids) failed");
1390 	} else if (!j->flags.keep_suppl_gids) {
1391 		/*
1392 		 * Only attempt to clear supplementary groups if we are changing
1393 		 * users or groups.
1394 		 */
1395 		if ((j->flags.uid || j->flags.gid) && setgroups(0, NULL))
1396 			pdie("setgroups(0, NULL) failed");
1397 	}
1398 
1399 	if (j->flags.gid && setresgid(j->gid, j->gid, j->gid))
1400 		pdie("setresgid(%d, %d, %d) failed", j->gid, j->gid, j->gid);
1401 
1402 	if (j->flags.uid && setresuid(j->uid, j->uid, j->uid))
1403 		pdie("setresuid(%d, %d, %d) failed", j->uid, j->uid, j->uid);
1404 }
1405 
1406 /*
1407  * We specifically do not use cap_valid() as that only tells us the last
1408  * valid cap we were *compiled* against (i.e. what the version of kernel
1409  * headers says). If we run on a different kernel version, then it's not
1410  * uncommon for that to be less (if an older kernel) or more (if a newer
1411  * kernel).
1412  * Normally, we suck up the answer via /proc. On Android, not all processes are
1413  * guaranteed to be able to access '/proc/sys/kernel/cap_last_cap' so we
1414  * programmatically find the value by calling prctl(PR_CAPBSET_READ).
1415  */
get_last_valid_cap()1416 static unsigned int get_last_valid_cap()
1417 {
1418 	unsigned int last_valid_cap = 0;
1419 	if (is_android()) {
1420 		for (; prctl(PR_CAPBSET_READ, last_valid_cap, 0, 0, 0) >= 0;
1421 		     ++last_valid_cap);
1422 
1423 		/* |last_valid_cap| will be the first failing value. */
1424 		if (last_valid_cap > 0) {
1425 			last_valid_cap--;
1426 		}
1427 	} else {
1428 		const char cap_file[] = "/proc/sys/kernel/cap_last_cap";
1429 		FILE *fp = fopen(cap_file, "re");
1430 		if (fscanf(fp, "%u", &last_valid_cap) != 1)
1431 			pdie("fscanf(%s)", cap_file);
1432 		fclose(fp);
1433 	}
1434 	return last_valid_cap;
1435 }
1436 
drop_capbset(uint64_t keep_mask,unsigned int last_valid_cap)1437 static void drop_capbset(uint64_t keep_mask, unsigned int last_valid_cap)
1438 {
1439 	const uint64_t one = 1;
1440 	unsigned int i;
1441 	for (i = 0; i < sizeof(keep_mask) * 8 && i <= last_valid_cap; ++i) {
1442 		if (keep_mask & (one << i))
1443 			continue;
1444 		if (prctl(PR_CAPBSET_DROP, i))
1445 			pdie("could not drop capability from bounding set");
1446 	}
1447 }
1448 
drop_caps(const struct minijail * j,unsigned int last_valid_cap)1449 static void drop_caps(const struct minijail *j, unsigned int last_valid_cap)
1450 {
1451 	if (!j->flags.use_caps)
1452 		return;
1453 
1454 	cap_t caps = cap_get_proc();
1455 	cap_value_t flag[1];
1456 	const uint64_t one = 1;
1457 	unsigned int i;
1458 	if (!caps)
1459 		die("can't get process caps");
1460 	if (cap_clear_flag(caps, CAP_INHERITABLE))
1461 		die("can't clear inheritable caps");
1462 	if (cap_clear_flag(caps, CAP_EFFECTIVE))
1463 		die("can't clear effective caps");
1464 	if (cap_clear_flag(caps, CAP_PERMITTED))
1465 		die("can't clear permitted caps");
1466 	for (i = 0; i < sizeof(j->caps) * 8 && i <= last_valid_cap; ++i) {
1467 		/* Keep CAP_SETPCAP for dropping bounding set bits. */
1468 		if (i != CAP_SETPCAP && !(j->caps & (one << i)))
1469 			continue;
1470 		flag[0] = i;
1471 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_SET))
1472 			die("can't add effective cap");
1473 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_SET))
1474 			die("can't add permitted cap");
1475 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_SET))
1476 			die("can't add inheritable cap");
1477 	}
1478 	if (cap_set_proc(caps))
1479 		die("can't apply initial cleaned capset");
1480 
1481 	/*
1482 	 * Instead of dropping bounding set first, do it here in case
1483 	 * the caller had a more permissive bounding set which could
1484 	 * have been used above to raise a capability that wasn't already
1485 	 * present. This requires CAP_SETPCAP, so we raised/kept it above.
1486 	 */
1487 	drop_capbset(j->caps, last_valid_cap);
1488 
1489 	/* If CAP_SETPCAP wasn't specifically requested, now we remove it. */
1490 	if ((j->caps & (one << CAP_SETPCAP)) == 0) {
1491 		flag[0] = CAP_SETPCAP;
1492 		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_CLEAR))
1493 			die("can't clear effective cap");
1494 		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_CLEAR))
1495 			die("can't clear permitted cap");
1496 		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_CLEAR))
1497 			die("can't clear inheritable cap");
1498 	}
1499 
1500 	if (cap_set_proc(caps))
1501 		die("can't apply final cleaned capset");
1502 
1503 	cap_free(caps);
1504 }
1505 
set_seccomp_filter(const struct minijail * j)1506 static void set_seccomp_filter(const struct minijail *j)
1507 {
1508 	/*
1509 	 * Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
1510 	 * in the kernel source tree for an explanation of the parameters.
1511 	 */
1512 	if (j->flags.no_new_privs) {
1513 		if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0))
1514 			pdie("prctl(PR_SET_NO_NEW_PRIVS)");
1515 	}
1516 
1517 	/*
1518 	 * Code running with ASan
1519 	 * (https://github.com/google/sanitizers/wiki/AddressSanitizer)
1520 	 * will make system calls not included in the syscall filter policy,
1521 	 * which will likely crash the program. Skip setting seccomp filter in
1522 	 * that case.
1523 	 * 'running_with_asan()' has no inputs and is completely defined at
1524 	 * build time, so this cannot be used by an attacker to skip setting
1525 	 * seccomp filter.
1526 	 */
1527 	if (j->flags.seccomp_filter && running_with_asan()) {
1528 		warn("running with ASan, not setting seccomp filter");
1529 		return;
1530 	}
1531 
1532 	if (j->flags.seccomp_filter) {
1533 		if (j->flags.seccomp_filter_logging) {
1534 			/*
1535 			 * If logging seccomp filter failures,
1536 			 * install the SIGSYS handler first.
1537 			 */
1538 			if (install_sigsys_handler())
1539 				pdie("failed to install SIGSYS handler");
1540 			warn("logging seccomp filter failures");
1541 		} else if (j->flags.seccomp_filter_tsync) {
1542 			/*
1543 			 * If setting thread sync,
1544 			 * reset the SIGSYS signal handler so that
1545 			 * the entire thread group is killed.
1546 			 */
1547 			if (signal(SIGSYS, SIG_DFL) == SIG_ERR)
1548 				pdie("failed to reset SIGSYS disposition");
1549 			info("reset SIGSYS disposition");
1550 		}
1551 	}
1552 
1553 	/*
1554 	 * Install the syscall filter.
1555 	 */
1556 	if (j->flags.seccomp_filter) {
1557 		if (j->flags.seccomp_filter_tsync) {
1558 			if (sys_seccomp(SECCOMP_SET_MODE_FILTER,
1559 					SECCOMP_FILTER_FLAG_TSYNC,
1560 					j->filter_prog)) {
1561 				pdie("seccomp(tsync) failed");
1562 			}
1563 		} else {
1564 			if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
1565 				  j->filter_prog)) {
1566 				pdie("prctl(seccomp_filter) failed");
1567 			}
1568 		}
1569 	}
1570 }
1571 
config_net_loopback(void)1572 static void config_net_loopback(void)
1573 {
1574 	static const char ifname[] = "lo";
1575 	int sock;
1576 	struct ifreq ifr;
1577 
1578 	/* Make sure people don't try to add really long names. */
1579 	_Static_assert(sizeof(ifname) <= IFNAMSIZ, "interface name too long");
1580 
1581 	sock = socket(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0);
1582 	if (sock < 0)
1583 		pdie("socket(AF_LOCAL) failed");
1584 
1585 	/*
1586 	 * Do the equiv of `ip link set up lo`.  The kernel will assign
1587 	 * IPv4 (127.0.0.1) & IPv6 (::1) addresses automatically!
1588 	 */
1589 	strcpy(ifr.ifr_name, ifname);
1590 	if (ioctl(sock, SIOCGIFFLAGS, &ifr) < 0)
1591 		pdie("ioctl(SIOCGIFFLAGS) failed");
1592 
1593 	/* The kernel preserves ifr.ifr_name for use. */
1594 	ifr.ifr_flags |= IFF_UP | IFF_RUNNING;
1595 	if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0)
1596 		pdie("ioctl(SIOCSIFFLAGS) failed");
1597 
1598 	close(sock);
1599 }
1600 
minijail_enter(const struct minijail * j)1601 void API minijail_enter(const struct minijail *j)
1602 {
1603 	/*
1604 	 * If we're dropping caps, get the last valid cap from /proc now,
1605 	 * since /proc can be unmounted before drop_caps() is called.
1606 	 */
1607 	unsigned int last_valid_cap = 0;
1608 	if (j->flags.capbset_drop || j->flags.use_caps)
1609 		last_valid_cap = get_last_valid_cap();
1610 
1611 	if (j->flags.pids)
1612 		die("tried to enter a pid-namespaced jail;"
1613 		    " try minijail_run()?");
1614 
1615 	if (j->flags.inherit_suppl_gids && !j->user)
1616 		die("cannot inherit supplementary groups without setting a "
1617 		    "username");
1618 
1619 	/*
1620 	 * We can't recover from failures if we've dropped privileges partially,
1621 	 * so we don't even try. If any of our operations fail, we abort() the
1622 	 * entire process.
1623 	 */
1624 	if (j->flags.enter_vfs && setns(j->mountns_fd, CLONE_NEWNS))
1625 		pdie("setns(CLONE_NEWNS) failed");
1626 
1627 	if (j->flags.vfs) {
1628 		if (unshare(CLONE_NEWNS))
1629 			pdie("unshare(CLONE_NEWNS) failed");
1630 		/*
1631 		 * Unless asked not to, remount all filesystems as private.
1632 		 * If they are shared, new bind mounts will creep out of our
1633 		 * namespace.
1634 		 * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
1635 		 */
1636 		if (!j->flags.skip_remount_private) {
1637 			if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, NULL))
1638 				pdie("mount(NULL, /, NULL, MS_REC | MS_PRIVATE,"
1639 				     " NULL) failed");
1640 		}
1641 	}
1642 
1643 	if (j->flags.ipc && unshare(CLONE_NEWIPC)) {
1644 		pdie("unshare(CLONE_NEWIPC) failed");
1645 	}
1646 
1647 	if (j->flags.enter_net) {
1648 		if (setns(j->netns_fd, CLONE_NEWNET))
1649 			pdie("setns(CLONE_NEWNET) failed");
1650 	} else if (j->flags.net) {
1651 		if (unshare(CLONE_NEWNET))
1652 			pdie("unshare(CLONE_NEWNET) failed");
1653 		config_net_loopback();
1654 	}
1655 
1656 	if (j->flags.ns_cgroups && unshare(CLONE_NEWCGROUP))
1657 		pdie("unshare(CLONE_NEWCGROUP) failed");
1658 
1659 	if (j->flags.new_session_keyring) {
1660 		if (syscall(SYS_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL) < 0)
1661 			pdie("keyctl(KEYCTL_JOIN_SESSION_KEYRING) failed");
1662 	}
1663 
1664 	if (j->flags.chroot && enter_chroot(j))
1665 		pdie("chroot");
1666 
1667 	if (j->flags.pivot_root && enter_pivot_root(j))
1668 		pdie("pivot_root");
1669 
1670 	if (j->flags.mount_tmp && mount_tmp(j))
1671 		pdie("mount_tmp");
1672 
1673 	if (j->flags.remount_proc_ro && remount_proc_readonly(j))
1674 		pdie("remount");
1675 
1676 	/*
1677 	 * If we're only dropping capabilities from the bounding set, but not
1678 	 * from the thread's (permitted|inheritable|effective) sets, do it now.
1679 	 */
1680 	if (j->flags.capbset_drop) {
1681 		drop_capbset(j->cap_bset, last_valid_cap);
1682 	}
1683 
1684 	if (j->flags.use_caps) {
1685 		/*
1686 		 * POSIX capabilities are a bit tricky. If we drop our
1687 		 * capability to change uids, our attempt to use setuid()
1688 		 * below will fail. Hang on to root caps across setuid(), then
1689 		 * lock securebits.
1690 		 */
1691 		if (prctl(PR_SET_KEEPCAPS, 1))
1692 			pdie("prctl(PR_SET_KEEPCAPS) failed");
1693 
1694 		/*
1695 		 * Kernels 4.3+ define a new securebit
1696 		 * (SECURE_NO_CAP_AMBIENT_RAISE), so using the SECURE_ALL_BITS
1697 		 * and SECURE_ALL_LOCKS masks from newer kernel headers will
1698 		 * return EPERM on older kernels. Detect this, and retry with
1699 		 * the right mask for older (2.6.26-4.2) kernels.
1700 		 */
1701 		int securebits_ret = prctl(PR_SET_SECUREBITS,
1702 					   SECURE_ALL_BITS | SECURE_ALL_LOCKS);
1703 		if (securebits_ret < 0) {
1704 			if (errno == EPERM) {
1705 				/* Possibly running on kernel < 4.3. */
1706 				securebits_ret = prctl(
1707 				    PR_SET_SECUREBITS,
1708 				    OLD_SECURE_ALL_BITS | OLD_SECURE_ALL_LOCKS);
1709 			}
1710 		}
1711 		if (securebits_ret < 0)
1712 			pdie("prctl(PR_SET_SECUREBITS) failed");
1713 	}
1714 
1715 	if (j->flags.no_new_privs) {
1716 		/*
1717 		 * If we're setting no_new_privs, we can drop privileges
1718 		 * before setting seccomp filter. This way filter policies
1719 		 * don't need to allow privilege-dropping syscalls.
1720 		 */
1721 		drop_ugid(j);
1722 		drop_caps(j, last_valid_cap);
1723 		set_seccomp_filter(j);
1724 	} else {
1725 		/*
1726 		 * If we're not setting no_new_privs,
1727 		 * we need to set seccomp filter *before* dropping privileges.
1728 		 * WARNING: this means that filter policies *must* allow
1729 		 * setgroups()/setresgid()/setresuid() for dropping root and
1730 		 * capget()/capset()/prctl() for dropping caps.
1731 		 */
1732 		set_seccomp_filter(j);
1733 		drop_ugid(j);
1734 		drop_caps(j, last_valid_cap);
1735 	}
1736 
1737 	/*
1738 	 * Select the specified alternate syscall table.  The table must not
1739 	 * block prctl(2) if we're using seccomp as well.
1740 	 */
1741 	if (j->flags.alt_syscall) {
1742 		if (prctl(PR_ALT_SYSCALL, 1, j->alt_syscall_table))
1743 			pdie("prctl(PR_ALT_SYSCALL) failed");
1744 	}
1745 
1746 	/*
1747 	 * seccomp has to come last since it cuts off all the other
1748 	 * privilege-dropping syscalls :)
1749 	 */
1750 	if (j->flags.seccomp && prctl(PR_SET_SECCOMP, 1)) {
1751 		if ((errno == EINVAL) && seccomp_can_softfail()) {
1752 			warn("seccomp not supported");
1753 			return;
1754 		}
1755 		pdie("prctl(PR_SET_SECCOMP) failed");
1756 	}
1757 }
1758 
1759 /* TODO(wad): will visibility affect this variable? */
1760 static int init_exitstatus = 0;
1761 
init_term(int sig)1762 void init_term(int __attribute__ ((unused)) sig)
1763 {
1764 	_exit(init_exitstatus);
1765 }
1766 
init(pid_t rootpid)1767 void init(pid_t rootpid)
1768 {
1769 	pid_t pid;
1770 	int status;
1771 	/* So that we exit with the right status. */
1772 	signal(SIGTERM, init_term);
1773 	/* TODO(wad): self jail with seccomp filters here. */
1774 	while ((pid = wait(&status)) > 0) {
1775 		/*
1776 		 * This loop will only end when either there are no processes
1777 		 * left inside our pid namespace or we get a signal.
1778 		 */
1779 		if (pid == rootpid)
1780 			init_exitstatus = status;
1781 	}
1782 	if (!WIFEXITED(init_exitstatus))
1783 		_exit(MINIJAIL_ERR_INIT);
1784 	_exit(WEXITSTATUS(init_exitstatus));
1785 }
1786 
minijail_from_fd(int fd,struct minijail * j)1787 int API minijail_from_fd(int fd, struct minijail *j)
1788 {
1789 	size_t sz = 0;
1790 	size_t bytes = read(fd, &sz, sizeof(sz));
1791 	char *buf;
1792 	int r;
1793 	if (sizeof(sz) != bytes)
1794 		return -EINVAL;
1795 	if (sz > USHRT_MAX)	/* arbitrary sanity check */
1796 		return -E2BIG;
1797 	buf = malloc(sz);
1798 	if (!buf)
1799 		return -ENOMEM;
1800 	bytes = read(fd, buf, sz);
1801 	if (bytes != sz) {
1802 		free(buf);
1803 		return -EINVAL;
1804 	}
1805 	r = minijail_unmarshal(j, buf, sz);
1806 	free(buf);
1807 	return r;
1808 }
1809 
minijail_to_fd(struct minijail * j,int fd)1810 int API minijail_to_fd(struct minijail *j, int fd)
1811 {
1812 	char *buf;
1813 	size_t sz = minijail_size(j);
1814 	ssize_t written;
1815 	int r;
1816 
1817 	if (!sz)
1818 		return -EINVAL;
1819 	buf = malloc(sz);
1820 	r = minijail_marshal(j, buf, sz);
1821 	if (r) {
1822 		free(buf);
1823 		return r;
1824 	}
1825 	/* Sends [size][minijail]. */
1826 	written = write(fd, &sz, sizeof(sz));
1827 	if (written != sizeof(sz)) {
1828 		free(buf);
1829 		return -EFAULT;
1830 	}
1831 	written = write(fd, buf, sz);
1832 	if (written < 0 || (size_t) written != sz) {
1833 		free(buf);
1834 		return -EFAULT;
1835 	}
1836 	free(buf);
1837 	return 0;
1838 }
1839 
setup_preload(void)1840 int setup_preload(void)
1841 {
1842 #if defined(__ANDROID__)
1843 	/* Don't use LDPRELOAD on Brillo. */
1844 	return 0;
1845 #else
1846 	char *oldenv = getenv(kLdPreloadEnvVar) ? : "";
1847 	char *newenv = malloc(strlen(oldenv) + 2 + strlen(PRELOADPATH));
1848 	if (!newenv)
1849 		return -ENOMEM;
1850 
1851 	/* Only insert a separating space if we have something to separate... */
1852 	sprintf(newenv, "%s%s%s", oldenv, strlen(oldenv) ? " " : "",
1853 		PRELOADPATH);
1854 
1855 	/* setenv() makes a copy of the string we give it. */
1856 	setenv(kLdPreloadEnvVar, newenv, 1);
1857 	free(newenv);
1858 	return 0;
1859 #endif
1860 }
1861 
setup_pipe(int fds[2])1862 int setup_pipe(int fds[2])
1863 {
1864 	int r = pipe(fds);
1865 	char fd_buf[11];
1866 	if (r)
1867 		return r;
1868 	r = snprintf(fd_buf, sizeof(fd_buf), "%d", fds[0]);
1869 	if (r <= 0)
1870 		return -EINVAL;
1871 	setenv(kFdEnvVar, fd_buf, 1);
1872 	return 0;
1873 }
1874 
setup_pipe_end(int fds[2],size_t index)1875 int setup_pipe_end(int fds[2], size_t index)
1876 {
1877 	if (index > 1)
1878 		return -1;
1879 
1880 	close(fds[1 - index]);
1881 	return fds[index];
1882 }
1883 
setup_and_dupe_pipe_end(int fds[2],size_t index,int fd)1884 int setup_and_dupe_pipe_end(int fds[2], size_t index, int fd)
1885 {
1886 	if (index > 1)
1887 		return -1;
1888 
1889 	close(fds[1 - index]);
1890 	/* dup2(2) the corresponding end of the pipe into |fd|. */
1891 	return dup2(fds[index], fd);
1892 }
1893 
close_open_fds(int * inheritable_fds,size_t size)1894 int close_open_fds(int *inheritable_fds, size_t size)
1895 {
1896 	const char *kFdPath = "/proc/self/fd";
1897 
1898 	DIR *d = opendir(kFdPath);
1899 	struct dirent *dir_entry;
1900 
1901 	if (d == NULL)
1902 		return -1;
1903 	int dir_fd = dirfd(d);
1904 	while ((dir_entry = readdir(d)) != NULL) {
1905 		size_t i;
1906 		char *end;
1907 		bool should_close = true;
1908 		const int fd = strtol(dir_entry->d_name, &end, 10);
1909 
1910 		if ((*end) != '\0') {
1911 			continue;
1912 		}
1913 		/*
1914 		 * We might have set up some pipes that we want to share with
1915 		 * the parent process, and should not be closed.
1916 		 */
1917 		for (i = 0; i < size; ++i) {
1918 			if (fd == inheritable_fds[i]) {
1919 				should_close = false;
1920 				break;
1921 			}
1922 		}
1923 		/* Also avoid closing the directory fd. */
1924 		if (should_close && fd != dir_fd)
1925 			close(fd);
1926 	}
1927 	closedir(d);
1928 	return 0;
1929 }
1930 
1931 int minijail_run_internal(struct minijail *j, const char *filename,
1932 			  char *const argv[], pid_t *pchild_pid,
1933 			  int *pstdin_fd, int *pstdout_fd, int *pstderr_fd,
1934 			  int use_preload);
1935 
minijail_run(struct minijail * j,const char * filename,char * const argv[])1936 int API minijail_run(struct minijail *j, const char *filename,
1937 		     char *const argv[])
1938 {
1939 	return minijail_run_internal(j, filename, argv, NULL, NULL, NULL, NULL,
1940 				     true);
1941 }
1942 
minijail_run_pid(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid)1943 int API minijail_run_pid(struct minijail *j, const char *filename,
1944 			 char *const argv[], pid_t *pchild_pid)
1945 {
1946 	return minijail_run_internal(j, filename, argv, pchild_pid,
1947 				     NULL, NULL, NULL, true);
1948 }
1949 
minijail_run_pipe(struct minijail * j,const char * filename,char * const argv[],int * pstdin_fd)1950 int API minijail_run_pipe(struct minijail *j, const char *filename,
1951 			  char *const argv[], int *pstdin_fd)
1952 {
1953 	return minijail_run_internal(j, filename, argv, NULL, pstdin_fd,
1954 				     NULL, NULL, true);
1955 }
1956 
minijail_run_pid_pipes(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)1957 int API minijail_run_pid_pipes(struct minijail *j, const char *filename,
1958 			       char *const argv[], pid_t *pchild_pid,
1959 			       int *pstdin_fd, int *pstdout_fd, int *pstderr_fd)
1960 {
1961 	return minijail_run_internal(j, filename, argv, pchild_pid,
1962 				     pstdin_fd, pstdout_fd, pstderr_fd, true);
1963 }
1964 
minijail_run_no_preload(struct minijail * j,const char * filename,char * const argv[])1965 int API minijail_run_no_preload(struct minijail *j, const char *filename,
1966 				char *const argv[])
1967 {
1968 	return minijail_run_internal(j, filename, argv, NULL, NULL, NULL, NULL,
1969 				     false);
1970 }
1971 
minijail_run_pid_pipes_no_preload(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd)1972 int API minijail_run_pid_pipes_no_preload(struct minijail *j,
1973 					  const char *filename,
1974 					  char *const argv[],
1975 					  pid_t *pchild_pid,
1976 					  int *pstdin_fd, int *pstdout_fd,
1977 					  int *pstderr_fd)
1978 {
1979 	return minijail_run_internal(j, filename, argv, pchild_pid,
1980 				     pstdin_fd, pstdout_fd, pstderr_fd, false);
1981 }
1982 
minijail_run_internal(struct minijail * j,const char * filename,char * const argv[],pid_t * pchild_pid,int * pstdin_fd,int * pstdout_fd,int * pstderr_fd,int use_preload)1983 int minijail_run_internal(struct minijail *j, const char *filename,
1984 			  char *const argv[], pid_t *pchild_pid,
1985 			  int *pstdin_fd, int *pstdout_fd, int *pstderr_fd,
1986 			  int use_preload)
1987 {
1988 	char *oldenv, *oldenv_copy = NULL;
1989 	pid_t child_pid;
1990 	int pipe_fds[2];
1991 	int stdin_fds[2];
1992 	int stdout_fds[2];
1993 	int stderr_fds[2];
1994 	int child_sync_pipe_fds[2];
1995 	int sync_child = 0;
1996 	int ret;
1997 	/* We need to remember this across the minijail_preexec() call. */
1998 	int pid_namespace = j->flags.pids;
1999 	int do_init = j->flags.do_init;
2000 
2001 	if (use_preload) {
2002 		oldenv = getenv(kLdPreloadEnvVar);
2003 		if (oldenv) {
2004 			oldenv_copy = strdup(oldenv);
2005 			if (!oldenv_copy)
2006 				return -ENOMEM;
2007 		}
2008 
2009 		if (setup_preload())
2010 			return -EFAULT;
2011 	}
2012 
2013 	if (!use_preload) {
2014 		if (j->flags.use_caps && j->caps != 0)
2015 			die("non-empty capabilities are not supported without "
2016 			    "LD_PRELOAD");
2017 	}
2018 
2019 	/*
2020 	 * Make the process group ID of this process equal to its PID.
2021 	 * In the non-interactive case (e.g. when the parent process is started
2022 	 * from init) this ensures the parent process and the jailed process
2023 	 * can be killed together.
2024 	 * When the parent process is started from the console this ensures
2025 	 * the call to setsid(2) in the jailed process succeeds.
2026 	 *
2027 	 * Don't fail on EPERM, since setpgid(0, 0) can only EPERM when
2028 	 * the process is already a process group leader.
2029 	 */
2030 	if (setpgid(0 /* use calling PID */, 0 /* make PGID = PID */)) {
2031 		if (errno != EPERM) {
2032 			pdie("setpgid(0, 0) failed");
2033 		}
2034 	}
2035 
2036 	if (use_preload) {
2037 		/*
2038 		 * Before we fork(2) and execve(2) the child process, we need
2039 		 * to open a pipe(2) to send the minijail configuration over.
2040 		 */
2041 		if (setup_pipe(pipe_fds))
2042 			return -EFAULT;
2043 	}
2044 
2045 	/*
2046 	 * If we want to write to the child process' standard input,
2047 	 * create the pipe(2) now.
2048 	 */
2049 	if (pstdin_fd) {
2050 		if (pipe(stdin_fds))
2051 			return -EFAULT;
2052 	}
2053 
2054 	/*
2055 	 * If we want to read from the child process' standard output,
2056 	 * create the pipe(2) now.
2057 	 */
2058 	if (pstdout_fd) {
2059 		if (pipe(stdout_fds))
2060 			return -EFAULT;
2061 	}
2062 
2063 	/*
2064 	 * If we want to read from the child process' standard error,
2065 	 * create the pipe(2) now.
2066 	 */
2067 	if (pstderr_fd) {
2068 		if (pipe(stderr_fds))
2069 			return -EFAULT;
2070 	}
2071 
2072 	/*
2073 	 * If we want to set up a new uid/gid map in the user namespace,
2074 	 * or if we need to add the child process to cgroups, create the pipe(2)
2075 	 * to sync between parent and child.
2076 	 */
2077 	if (j->flags.userns || j->flags.cgroups) {
2078 		sync_child = 1;
2079 		if (pipe(child_sync_pipe_fds))
2080 			return -EFAULT;
2081 	}
2082 
2083 	/*
2084 	 * Use sys_clone() if and only if we're creating a pid namespace.
2085 	 *
2086 	 * tl;dr: WARNING: do not mix pid namespaces and multithreading.
2087 	 *
2088 	 * In multithreaded programs, there are a bunch of locks inside libc,
2089 	 * some of which may be held by other threads at the time that we call
2090 	 * minijail_run_pid(). If we call fork(), glibc does its level best to
2091 	 * ensure that we hold all of these locks before it calls clone()
2092 	 * internally and drop them after clone() returns, but when we call
2093 	 * sys_clone(2) directly, all that gets bypassed and we end up with a
2094 	 * child address space where some of libc's important locks are held by
2095 	 * other threads (which did not get cloned, and hence will never release
2096 	 * those locks). This is okay so long as we call exec() immediately
2097 	 * after, but a bunch of seemingly-innocent libc functions like setenv()
2098 	 * take locks.
2099 	 *
2100 	 * Hence, only call sys_clone() if we need to, in order to get at pid
2101 	 * namespacing. If we follow this path, the child's address space might
2102 	 * have broken locks; you may only call functions that do not acquire
2103 	 * any locks.
2104 	 *
2105 	 * Unfortunately, fork() acquires every lock it can get its hands on, as
2106 	 * previously detailed, so this function is highly likely to deadlock
2107 	 * later on (see "deadlock here") if we're multithreaded.
2108 	 *
2109 	 * We might hack around this by having the clone()d child (init of the
2110 	 * pid namespace) return directly, rather than leaving the clone()d
2111 	 * process hanging around to be init for the new namespace (and having
2112 	 * its fork()ed child return in turn), but that process would be
2113 	 * crippled with its libc locks potentially broken. We might try
2114 	 * fork()ing in the parent before we clone() to ensure that we own all
2115 	 * the locks, but then we have to have the forked child hanging around
2116 	 * consuming resources (and possibly having file descriptors / shared
2117 	 * memory regions / etc attached). We'd need to keep the child around to
2118 	 * avoid having its children get reparented to init.
2119 	 *
2120 	 * TODO(ellyjones): figure out if the "forked child hanging around"
2121 	 * problem is fixable or not. It would be nice if we worked in this
2122 	 * case.
2123 	 */
2124 	if (pid_namespace) {
2125 		int clone_flags = CLONE_NEWPID | SIGCHLD;
2126 		if (j->flags.userns)
2127 			clone_flags |= CLONE_NEWUSER;
2128 		child_pid = syscall(SYS_clone, clone_flags, NULL);
2129 	} else {
2130 		child_pid = fork();
2131 	}
2132 
2133 	if (child_pid < 0) {
2134 		if (use_preload) {
2135 			free(oldenv_copy);
2136 		}
2137 		die("failed to fork child");
2138 	}
2139 
2140 	if (child_pid) {
2141 		if (use_preload) {
2142 			/* Restore parent's LD_PRELOAD. */
2143 			if (oldenv_copy) {
2144 				setenv(kLdPreloadEnvVar, oldenv_copy, 1);
2145 				free(oldenv_copy);
2146 			} else {
2147 				unsetenv(kLdPreloadEnvVar);
2148 			}
2149 			unsetenv(kFdEnvVar);
2150 		}
2151 
2152 		j->initpid = child_pid;
2153 
2154 		if (j->flags.pid_file)
2155 			write_pid_file_or_die(j);
2156 
2157 		if (j->flags.cgroups)
2158 			add_to_cgroups_or_die(j);
2159 
2160 		if (j->flags.userns)
2161 			write_ugid_maps_or_die(j);
2162 
2163 		if (sync_child)
2164 			parent_setup_complete(child_sync_pipe_fds);
2165 
2166 		if (use_preload) {
2167 			/* Send marshalled minijail. */
2168 			close(pipe_fds[0]);	/* read endpoint */
2169 			ret = minijail_to_fd(j, pipe_fds[1]);
2170 			close(pipe_fds[1]);	/* write endpoint */
2171 			if (ret) {
2172 				kill(j->initpid, SIGKILL);
2173 				die("failed to send marshalled minijail");
2174 			}
2175 		}
2176 
2177 		if (pchild_pid)
2178 			*pchild_pid = child_pid;
2179 
2180 		/*
2181 		 * If we want to write to the child process' standard input,
2182 		 * set up the write end of the pipe.
2183 		 */
2184 		if (pstdin_fd)
2185 			*pstdin_fd = setup_pipe_end(stdin_fds,
2186 						    1 /* write end */);
2187 
2188 		/*
2189 		 * If we want to read from the child process' standard output,
2190 		 * set up the read end of the pipe.
2191 		 */
2192 		if (pstdout_fd)
2193 			*pstdout_fd = setup_pipe_end(stdout_fds,
2194 						     0 /* read end */);
2195 
2196 		/*
2197 		 * If we want to read from the child process' standard error,
2198 		 * set up the read end of the pipe.
2199 		 */
2200 		if (pstderr_fd)
2201 			*pstderr_fd = setup_pipe_end(stderr_fds,
2202 						     0 /* read end */);
2203 
2204 		return 0;
2205 	}
2206 	/* Child process. */
2207 	free(oldenv_copy);
2208 
2209 	if (j->flags.reset_signal_mask) {
2210 		sigset_t signal_mask;
2211 		if (sigemptyset(&signal_mask) != 0)
2212 			pdie("sigemptyset failed");
2213 		if (sigprocmask(SIG_SETMASK, &signal_mask, NULL) != 0)
2214 			pdie("sigprocmask failed");
2215 	}
2216 
2217 	if (j->flags.close_open_fds) {
2218 		const size_t kMaxInheritableFdsSize = 10;
2219 		int inheritable_fds[kMaxInheritableFdsSize];
2220 		size_t size = 0;
2221 		if (use_preload) {
2222 			inheritable_fds[size++] = pipe_fds[0];
2223 			inheritable_fds[size++] = pipe_fds[1];
2224 		}
2225 		if (sync_child) {
2226 			inheritable_fds[size++] = child_sync_pipe_fds[0];
2227 			inheritable_fds[size++] = child_sync_pipe_fds[1];
2228 		}
2229 		if (pstdin_fd) {
2230 			inheritable_fds[size++] = stdin_fds[0];
2231 			inheritable_fds[size++] = stdin_fds[1];
2232 		}
2233 		if (pstdout_fd) {
2234 			inheritable_fds[size++] = stdout_fds[0];
2235 			inheritable_fds[size++] = stdout_fds[1];
2236 		}
2237 		if (pstderr_fd) {
2238 			inheritable_fds[size++] = stderr_fds[0];
2239 			inheritable_fds[size++] = stderr_fds[1];
2240 		}
2241 
2242 		if (close_open_fds(inheritable_fds, size) < 0)
2243 			die("failed to close open file descriptors");
2244 	}
2245 
2246 	if (sync_child)
2247 		wait_for_parent_setup(child_sync_pipe_fds);
2248 
2249 	if (j->flags.userns)
2250 		enter_user_namespace(j);
2251 
2252 	/*
2253 	 * If we want to write to the jailed process' standard input,
2254 	 * set up the read end of the pipe.
2255 	 */
2256 	if (pstdin_fd) {
2257 		if (setup_and_dupe_pipe_end(stdin_fds, 0 /* read end */,
2258 					    STDIN_FILENO) < 0)
2259 			die("failed to set up stdin pipe");
2260 	}
2261 
2262 	/*
2263 	 * If we want to read from the jailed process' standard output,
2264 	 * set up the write end of the pipe.
2265 	 */
2266 	if (pstdout_fd) {
2267 		if (setup_and_dupe_pipe_end(stdout_fds, 1 /* write end */,
2268 					    STDOUT_FILENO) < 0)
2269 			die("failed to set up stdout pipe");
2270 	}
2271 
2272 	/*
2273 	 * If we want to read from the jailed process' standard error,
2274 	 * set up the write end of the pipe.
2275 	 */
2276 	if (pstderr_fd) {
2277 		if (setup_and_dupe_pipe_end(stderr_fds, 1 /* write end */,
2278 					    STDERR_FILENO) < 0)
2279 			die("failed to set up stderr pipe");
2280 	}
2281 
2282 	/*
2283 	 * If any of stdin, stdout, or stderr are TTYs, create a new session.
2284 	 * This prevents the jailed process from using the TIOCSTI ioctl
2285 	 * to push characters into the parent process terminal's input buffer,
2286 	 * therefore escaping the jail.
2287 	 */
2288 	if (isatty(STDIN_FILENO) || isatty(STDOUT_FILENO) ||
2289 	    isatty(STDERR_FILENO)) {
2290 		if (setsid() < 0) {
2291 			pdie("setsid() failed");
2292 		}
2293 	}
2294 
2295 	/* If running an init program, let it decide when/how to mount /proc. */
2296 	if (pid_namespace && !do_init)
2297 		j->flags.remount_proc_ro = 0;
2298 
2299 	if (use_preload) {
2300 		/* Strip out flags that cannot be inherited across execve(2). */
2301 		minijail_preexec(j);
2302 	} else {
2303 		/*
2304 		 * If not using LD_PRELOAD, do all jailing before execve(2).
2305 		 * Note that PID namespaces can only be entered on fork(2),
2306 		 * so that flag is still cleared.
2307 		 */
2308 		j->flags.pids = 0;
2309 	}
2310 	/* Jail this process, then execve(2) the target. */
2311 	minijail_enter(j);
2312 
2313 	if (pid_namespace && do_init) {
2314 		/*
2315 		 * pid namespace: this process will become init inside the new
2316 		 * namespace. We don't want all programs we might exec to have
2317 		 * to know how to be init. Normally (do_init == 1) we fork off
2318 		 * a child to actually run the program. If |do_init == 0|, we
2319 		 * let the program keep pid 1 and be init.
2320 		 *
2321 		 * If we're multithreaded, we'll probably deadlock here. See
2322 		 * WARNING above.
2323 		 */
2324 		child_pid = fork();
2325 		if (child_pid < 0) {
2326 			_exit(child_pid);
2327 		} else if (child_pid > 0) {
2328 			/*
2329 			 * Best effort. Don't bother checking the return value.
2330 			 */
2331 			prctl(PR_SET_NAME, "minijail-init");
2332 			init(child_pid);	/* Never returns. */
2333 		}
2334 	}
2335 
2336 	/*
2337 	 * If we aren't pid-namespaced, or the jailed program asked to be init:
2338 	 *   calling process
2339 	 *   -> execve()-ing process
2340 	 * If we are:
2341 	 *   calling process
2342 	 *   -> init()-ing process
2343 	 *      -> execve()-ing process
2344 	 */
2345 	ret = execve(filename, argv, environ);
2346 	if (ret == -1) {
2347 		pwarn("execve(%s) failed", filename);
2348 	}
2349 	_exit(ret);
2350 }
2351 
minijail_kill(struct minijail * j)2352 int API minijail_kill(struct minijail *j)
2353 {
2354 	int st;
2355 	if (kill(j->initpid, SIGTERM))
2356 		return -errno;
2357 	if (waitpid(j->initpid, &st, 0) < 0)
2358 		return -errno;
2359 	return st;
2360 }
2361 
minijail_wait(struct minijail * j)2362 int API minijail_wait(struct minijail *j)
2363 {
2364 	int st;
2365 	if (waitpid(j->initpid, &st, 0) < 0)
2366 		return -errno;
2367 
2368 	if (!WIFEXITED(st)) {
2369 		int error_status = st;
2370 		if (WIFSIGNALED(st)) {
2371 			int signum = WTERMSIG(st);
2372 			warn("child process %d received signal %d",
2373 			     j->initpid, signum);
2374 			/*
2375 			 * We return MINIJAIL_ERR_JAIL if the process received
2376 			 * SIGSYS, which happens when a syscall is blocked by
2377 			 * seccomp filters.
2378 			 * If not, we do what bash(1) does:
2379 			 * $? = 128 + signum
2380 			 */
2381 			if (signum == SIGSYS) {
2382 				error_status = MINIJAIL_ERR_JAIL;
2383 			} else {
2384 				error_status = 128 + signum;
2385 			}
2386 		}
2387 		return error_status;
2388 	}
2389 
2390 	int exit_status = WEXITSTATUS(st);
2391 	if (exit_status != 0)
2392 		info("child process %d exited with status %d",
2393 		     j->initpid, exit_status);
2394 
2395 	return exit_status;
2396 }
2397 
minijail_destroy(struct minijail * j)2398 void API minijail_destroy(struct minijail *j)
2399 {
2400 	size_t i;
2401 
2402 	if (j->flags.seccomp_filter && j->filter_prog) {
2403 		free(j->filter_prog->filter);
2404 		free(j->filter_prog);
2405 	}
2406 	while (j->mounts_head) {
2407 		struct mountpoint *m = j->mounts_head;
2408 		j->mounts_head = j->mounts_head->next;
2409 		free(m->data);
2410 		free(m->type);
2411 		free(m->dest);
2412 		free(m->src);
2413 		free(m);
2414 	}
2415 	j->mounts_tail = NULL;
2416 	if (j->user)
2417 		free(j->user);
2418 	if (j->suppl_gid_list)
2419 		free(j->suppl_gid_list);
2420 	if (j->chrootdir)
2421 		free(j->chrootdir);
2422 	if (j->pid_file_path)
2423 		free(j->pid_file_path);
2424 	if (j->uidmap)
2425 		free(j->uidmap);
2426 	if (j->gidmap)
2427 		free(j->gidmap);
2428 	if (j->alt_syscall_table)
2429 		free(j->alt_syscall_table);
2430 	for (i = 0; i < j->cgroup_count; ++i)
2431 		free(j->cgroups[i]);
2432 	free(j);
2433 }
2434