• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /*
8  * ctl_mtx protects the following:
9  * - ctl_stats.*
10  */
11 static malloc_mutex_t	ctl_mtx;
12 static bool		ctl_initialized;
13 static uint64_t		ctl_epoch;
14 static ctl_stats_t	ctl_stats;
15 
16 /******************************************************************************/
17 /* Helpers for named and indexed nodes. */
18 
19 JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)20 ctl_named_node(const ctl_node_t *node)
21 {
22 
23 	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
24 }
25 
26 JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,int index)27 ctl_named_children(const ctl_named_node_t *node, int index)
28 {
29 	const ctl_named_node_t *children = ctl_named_node(node->children);
30 
31 	return (children ? &children[index] : NULL);
32 }
33 
34 JEMALLOC_INLINE_C const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)35 ctl_indexed_node(const ctl_node_t *node)
36 {
37 
38 	return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
39 }
40 
41 /******************************************************************************/
42 /* Function prototypes for non-inline static functions. */
43 
44 #define	CTL_PROTO(n)							\
45 static int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
46     size_t *oldlenp, void *newp, size_t newlen);
47 
48 #define	INDEX_PROTO(n)							\
49 static const ctl_named_node_t	*n##_index(const size_t *mib,		\
50     size_t miblen, size_t i);
51 
52 static bool	ctl_arena_init(ctl_arena_stats_t *astats);
53 static void	ctl_arena_clear(ctl_arena_stats_t *astats);
54 static void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
55     arena_t *arena);
56 static void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
57     ctl_arena_stats_t *astats);
58 static void	ctl_arena_refresh(arena_t *arena, unsigned i);
59 static bool	ctl_grow(void);
60 static void	ctl_refresh(void);
61 static bool	ctl_init(void);
62 static int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
63     size_t *mibp, size_t *depthp);
64 
65 CTL_PROTO(version)
66 CTL_PROTO(epoch)
67 CTL_PROTO(thread_tcache_enabled)
68 CTL_PROTO(thread_tcache_flush)
69 CTL_PROTO(thread_prof_name)
70 CTL_PROTO(thread_prof_active)
71 CTL_PROTO(thread_arena)
72 CTL_PROTO(thread_allocated)
73 CTL_PROTO(thread_allocatedp)
74 CTL_PROTO(thread_deallocated)
75 CTL_PROTO(thread_deallocatedp)
76 CTL_PROTO(config_debug)
77 CTL_PROTO(config_fill)
78 CTL_PROTO(config_lazy_lock)
79 CTL_PROTO(config_munmap)
80 CTL_PROTO(config_prof)
81 CTL_PROTO(config_prof_libgcc)
82 CTL_PROTO(config_prof_libunwind)
83 CTL_PROTO(config_stats)
84 CTL_PROTO(config_tcache)
85 CTL_PROTO(config_tls)
86 CTL_PROTO(config_utrace)
87 CTL_PROTO(config_valgrind)
88 CTL_PROTO(config_xmalloc)
89 CTL_PROTO(opt_abort)
90 CTL_PROTO(opt_dss)
91 CTL_PROTO(opt_lg_chunk)
92 CTL_PROTO(opt_narenas)
93 CTL_PROTO(opt_lg_dirty_mult)
94 CTL_PROTO(opt_stats_print)
95 CTL_PROTO(opt_junk)
96 CTL_PROTO(opt_zero)
97 CTL_PROTO(opt_quarantine)
98 CTL_PROTO(opt_redzone)
99 CTL_PROTO(opt_utrace)
100 CTL_PROTO(opt_xmalloc)
101 CTL_PROTO(opt_tcache)
102 CTL_PROTO(opt_lg_tcache_max)
103 CTL_PROTO(opt_prof)
104 CTL_PROTO(opt_prof_prefix)
105 CTL_PROTO(opt_prof_active)
106 CTL_PROTO(opt_prof_thread_active_init)
107 CTL_PROTO(opt_lg_prof_sample)
108 CTL_PROTO(opt_lg_prof_interval)
109 CTL_PROTO(opt_prof_gdump)
110 CTL_PROTO(opt_prof_final)
111 CTL_PROTO(opt_prof_leak)
112 CTL_PROTO(opt_prof_accum)
113 CTL_PROTO(tcache_create)
114 CTL_PROTO(tcache_flush)
115 CTL_PROTO(tcache_destroy)
116 CTL_PROTO(arena_i_purge)
117 static void	arena_purge(unsigned arena_ind);
118 CTL_PROTO(arena_i_dss)
119 CTL_PROTO(arena_i_lg_dirty_mult)
120 CTL_PROTO(arena_i_chunk_alloc)
121 CTL_PROTO(arena_i_chunk_dalloc)
122 CTL_PROTO(arena_i_chunk_purge)
123 INDEX_PROTO(arena_i)
124 CTL_PROTO(arenas_bin_i_size)
125 CTL_PROTO(arenas_bin_i_nregs)
126 CTL_PROTO(arenas_bin_i_run_size)
127 INDEX_PROTO(arenas_bin_i)
128 CTL_PROTO(arenas_lrun_i_size)
129 INDEX_PROTO(arenas_lrun_i)
130 CTL_PROTO(arenas_hchunk_i_size)
131 INDEX_PROTO(arenas_hchunk_i)
132 CTL_PROTO(arenas_narenas)
133 CTL_PROTO(arenas_initialized)
134 CTL_PROTO(arenas_lg_dirty_mult)
135 CTL_PROTO(arenas_quantum)
136 CTL_PROTO(arenas_page)
137 CTL_PROTO(arenas_tcache_max)
138 CTL_PROTO(arenas_nbins)
139 CTL_PROTO(arenas_nhbins)
140 CTL_PROTO(arenas_nlruns)
141 CTL_PROTO(arenas_nhchunks)
142 CTL_PROTO(arenas_extend)
143 CTL_PROTO(prof_thread_active_init)
144 CTL_PROTO(prof_active)
145 CTL_PROTO(prof_dump)
146 CTL_PROTO(prof_gdump)
147 CTL_PROTO(prof_reset)
148 CTL_PROTO(prof_interval)
149 CTL_PROTO(lg_prof_sample)
150 CTL_PROTO(stats_arenas_i_small_allocated)
151 CTL_PROTO(stats_arenas_i_small_nmalloc)
152 CTL_PROTO(stats_arenas_i_small_ndalloc)
153 CTL_PROTO(stats_arenas_i_small_nrequests)
154 CTL_PROTO(stats_arenas_i_large_allocated)
155 CTL_PROTO(stats_arenas_i_large_nmalloc)
156 CTL_PROTO(stats_arenas_i_large_ndalloc)
157 CTL_PROTO(stats_arenas_i_large_nrequests)
158 CTL_PROTO(stats_arenas_i_huge_allocated)
159 CTL_PROTO(stats_arenas_i_huge_nmalloc)
160 CTL_PROTO(stats_arenas_i_huge_ndalloc)
161 CTL_PROTO(stats_arenas_i_huge_nrequests)
162 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
163 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
164 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
165 CTL_PROTO(stats_arenas_i_bins_j_curregs)
166 CTL_PROTO(stats_arenas_i_bins_j_nfills)
167 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
168 CTL_PROTO(stats_arenas_i_bins_j_nruns)
169 CTL_PROTO(stats_arenas_i_bins_j_nreruns)
170 CTL_PROTO(stats_arenas_i_bins_j_curruns)
171 INDEX_PROTO(stats_arenas_i_bins_j)
172 CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
173 CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
174 CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
175 CTL_PROTO(stats_arenas_i_lruns_j_curruns)
176 INDEX_PROTO(stats_arenas_i_lruns_j)
177 CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
178 CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
179 CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
180 CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
181 INDEX_PROTO(stats_arenas_i_hchunks_j)
182 CTL_PROTO(stats_arenas_i_nthreads)
183 CTL_PROTO(stats_arenas_i_dss)
184 CTL_PROTO(stats_arenas_i_lg_dirty_mult)
185 CTL_PROTO(stats_arenas_i_pactive)
186 CTL_PROTO(stats_arenas_i_pdirty)
187 CTL_PROTO(stats_arenas_i_mapped)
188 CTL_PROTO(stats_arenas_i_npurge)
189 CTL_PROTO(stats_arenas_i_nmadvise)
190 CTL_PROTO(stats_arenas_i_purged)
191 CTL_PROTO(stats_arenas_i_metadata_mapped)
192 CTL_PROTO(stats_arenas_i_metadata_allocated)
193 INDEX_PROTO(stats_arenas_i)
194 CTL_PROTO(stats_cactive)
195 CTL_PROTO(stats_allocated)
196 CTL_PROTO(stats_active)
197 CTL_PROTO(stats_metadata)
198 CTL_PROTO(stats_resident)
199 CTL_PROTO(stats_mapped)
200 
201 /******************************************************************************/
202 /* mallctl tree. */
203 
204 /* Maximum tree depth. */
205 #define	CTL_MAX_DEPTH	6
206 
207 #define	NAME(n)	{true},	n
208 #define	CHILD(t, c)							\
209 	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
210 	(ctl_node_t *)c##_node,						\
211 	NULL
212 #define	CTL(c)	0, NULL, c##_ctl
213 
214 /*
215  * Only handles internal indexed nodes, since there are currently no external
216  * ones.
217  */
218 #define	INDEX(i)	{false},	i##_index
219 
220 static const ctl_named_node_t	thread_tcache_node[] = {
221 	{NAME("enabled"),	CTL(thread_tcache_enabled)},
222 	{NAME("flush"),		CTL(thread_tcache_flush)}
223 };
224 
225 static const ctl_named_node_t	thread_prof_node[] = {
226 	{NAME("name"),		CTL(thread_prof_name)},
227 	{NAME("active"),	CTL(thread_prof_active)}
228 };
229 
230 static const ctl_named_node_t	thread_node[] = {
231 	{NAME("arena"),		CTL(thread_arena)},
232 	{NAME("allocated"),	CTL(thread_allocated)},
233 	{NAME("allocatedp"),	CTL(thread_allocatedp)},
234 	{NAME("deallocated"),	CTL(thread_deallocated)},
235 	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
236 	{NAME("tcache"),	CHILD(named, thread_tcache)},
237 	{NAME("prof"),		CHILD(named, thread_prof)}
238 };
239 
240 static const ctl_named_node_t	config_node[] = {
241 	{NAME("debug"),		CTL(config_debug)},
242 	{NAME("fill"),		CTL(config_fill)},
243 	{NAME("lazy_lock"),	CTL(config_lazy_lock)},
244 	{NAME("munmap"),	CTL(config_munmap)},
245 	{NAME("prof"),		CTL(config_prof)},
246 	{NAME("prof_libgcc"),	CTL(config_prof_libgcc)},
247 	{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
248 	{NAME("stats"),		CTL(config_stats)},
249 	{NAME("tcache"),	CTL(config_tcache)},
250 	{NAME("tls"),		CTL(config_tls)},
251 	{NAME("utrace"),	CTL(config_utrace)},
252 	{NAME("valgrind"),	CTL(config_valgrind)},
253 	{NAME("xmalloc"),	CTL(config_xmalloc)}
254 };
255 
256 static const ctl_named_node_t opt_node[] = {
257 	{NAME("abort"),		CTL(opt_abort)},
258 	{NAME("dss"),		CTL(opt_dss)},
259 	{NAME("lg_chunk"),	CTL(opt_lg_chunk)},
260 	{NAME("narenas"),	CTL(opt_narenas)},
261 	{NAME("lg_dirty_mult"),	CTL(opt_lg_dirty_mult)},
262 	{NAME("stats_print"),	CTL(opt_stats_print)},
263 	{NAME("junk"),		CTL(opt_junk)},
264 	{NAME("zero"),		CTL(opt_zero)},
265 	{NAME("quarantine"),	CTL(opt_quarantine)},
266 	{NAME("redzone"),	CTL(opt_redzone)},
267 	{NAME("utrace"),	CTL(opt_utrace)},
268 	{NAME("xmalloc"),	CTL(opt_xmalloc)},
269 	{NAME("tcache"),	CTL(opt_tcache)},
270 	{NAME("lg_tcache_max"),	CTL(opt_lg_tcache_max)},
271 	{NAME("prof"),		CTL(opt_prof)},
272 	{NAME("prof_prefix"),	CTL(opt_prof_prefix)},
273 	{NAME("prof_active"),	CTL(opt_prof_active)},
274 	{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
275 	{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
276 	{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
277 	{NAME("prof_gdump"),	CTL(opt_prof_gdump)},
278 	{NAME("prof_final"),	CTL(opt_prof_final)},
279 	{NAME("prof_leak"),	CTL(opt_prof_leak)},
280 	{NAME("prof_accum"),	CTL(opt_prof_accum)}
281 };
282 
283 static const ctl_named_node_t	tcache_node[] = {
284 	{NAME("create"),	CTL(tcache_create)},
285 	{NAME("flush"),		CTL(tcache_flush)},
286 	{NAME("destroy"),	CTL(tcache_destroy)}
287 };
288 
289 static const ctl_named_node_t chunk_node[] = {
290 	{NAME("alloc"),		CTL(arena_i_chunk_alloc)},
291 	{NAME("dalloc"),	CTL(arena_i_chunk_dalloc)},
292 	{NAME("purge"),		CTL(arena_i_chunk_purge)}
293 };
294 
295 static const ctl_named_node_t arena_i_node[] = {
296 	{NAME("purge"),		CTL(arena_i_purge)},
297 	{NAME("dss"),		CTL(arena_i_dss)},
298 	{NAME("lg_dirty_mult"),	CTL(arena_i_lg_dirty_mult)},
299 	{NAME("chunk"),		CHILD(named, chunk)},
300 };
301 static const ctl_named_node_t super_arena_i_node[] = {
302 	{NAME(""),		CHILD(named, arena_i)}
303 };
304 
305 static const ctl_indexed_node_t arena_node[] = {
306 	{INDEX(arena_i)}
307 };
308 
309 static const ctl_named_node_t arenas_bin_i_node[] = {
310 	{NAME("size"),		CTL(arenas_bin_i_size)},
311 	{NAME("nregs"),		CTL(arenas_bin_i_nregs)},
312 	{NAME("run_size"),	CTL(arenas_bin_i_run_size)}
313 };
314 static const ctl_named_node_t super_arenas_bin_i_node[] = {
315 	{NAME(""),		CHILD(named, arenas_bin_i)}
316 };
317 
318 static const ctl_indexed_node_t arenas_bin_node[] = {
319 	{INDEX(arenas_bin_i)}
320 };
321 
322 static const ctl_named_node_t arenas_lrun_i_node[] = {
323 	{NAME("size"),		CTL(arenas_lrun_i_size)}
324 };
325 static const ctl_named_node_t super_arenas_lrun_i_node[] = {
326 	{NAME(""),		CHILD(named, arenas_lrun_i)}
327 };
328 
329 static const ctl_indexed_node_t arenas_lrun_node[] = {
330 	{INDEX(arenas_lrun_i)}
331 };
332 
333 static const ctl_named_node_t arenas_hchunk_i_node[] = {
334 	{NAME("size"),		CTL(arenas_hchunk_i_size)}
335 };
336 static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
337 	{NAME(""),		CHILD(named, arenas_hchunk_i)}
338 };
339 
340 static const ctl_indexed_node_t arenas_hchunk_node[] = {
341 	{INDEX(arenas_hchunk_i)}
342 };
343 
344 static const ctl_named_node_t arenas_node[] = {
345 	{NAME("narenas"),	CTL(arenas_narenas)},
346 	{NAME("initialized"),	CTL(arenas_initialized)},
347 	{NAME("lg_dirty_mult"),	CTL(arenas_lg_dirty_mult)},
348 	{NAME("quantum"),	CTL(arenas_quantum)},
349 	{NAME("page"),		CTL(arenas_page)},
350 	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
351 	{NAME("nbins"),		CTL(arenas_nbins)},
352 	{NAME("nhbins"),	CTL(arenas_nhbins)},
353 	{NAME("bin"),		CHILD(indexed, arenas_bin)},
354 	{NAME("nlruns"),	CTL(arenas_nlruns)},
355 	{NAME("lrun"),		CHILD(indexed, arenas_lrun)},
356 	{NAME("nhchunks"),	CTL(arenas_nhchunks)},
357 	{NAME("hchunk"),	CHILD(indexed, arenas_hchunk)},
358 	{NAME("extend"),	CTL(arenas_extend)}
359 };
360 
361 static const ctl_named_node_t	prof_node[] = {
362 	{NAME("thread_active_init"), CTL(prof_thread_active_init)},
363 	{NAME("active"),	CTL(prof_active)},
364 	{NAME("dump"),		CTL(prof_dump)},
365 	{NAME("gdump"),		CTL(prof_gdump)},
366 	{NAME("reset"),		CTL(prof_reset)},
367 	{NAME("interval"),	CTL(prof_interval)},
368 	{NAME("lg_sample"),	CTL(lg_prof_sample)}
369 };
370 
371 static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
372 	{NAME("mapped"),	CTL(stats_arenas_i_metadata_mapped)},
373 	{NAME("allocated"),	CTL(stats_arenas_i_metadata_allocated)}
374 };
375 
376 static const ctl_named_node_t stats_arenas_i_small_node[] = {
377 	{NAME("allocated"),	CTL(stats_arenas_i_small_allocated)},
378 	{NAME("nmalloc"),	CTL(stats_arenas_i_small_nmalloc)},
379 	{NAME("ndalloc"),	CTL(stats_arenas_i_small_ndalloc)},
380 	{NAME("nrequests"),	CTL(stats_arenas_i_small_nrequests)}
381 };
382 
383 static const ctl_named_node_t stats_arenas_i_large_node[] = {
384 	{NAME("allocated"),	CTL(stats_arenas_i_large_allocated)},
385 	{NAME("nmalloc"),	CTL(stats_arenas_i_large_nmalloc)},
386 	{NAME("ndalloc"),	CTL(stats_arenas_i_large_ndalloc)},
387 	{NAME("nrequests"),	CTL(stats_arenas_i_large_nrequests)}
388 };
389 
390 static const ctl_named_node_t stats_arenas_i_huge_node[] = {
391 	{NAME("allocated"),	CTL(stats_arenas_i_huge_allocated)},
392 	{NAME("nmalloc"),	CTL(stats_arenas_i_huge_nmalloc)},
393 	{NAME("ndalloc"),	CTL(stats_arenas_i_huge_ndalloc)},
394 	{NAME("nrequests"),	CTL(stats_arenas_i_huge_nrequests)}
395 };
396 
397 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
398 	{NAME("nmalloc"),	CTL(stats_arenas_i_bins_j_nmalloc)},
399 	{NAME("ndalloc"),	CTL(stats_arenas_i_bins_j_ndalloc)},
400 	{NAME("nrequests"),	CTL(stats_arenas_i_bins_j_nrequests)},
401 	{NAME("curregs"),	CTL(stats_arenas_i_bins_j_curregs)},
402 	{NAME("nfills"),	CTL(stats_arenas_i_bins_j_nfills)},
403 	{NAME("nflushes"),	CTL(stats_arenas_i_bins_j_nflushes)},
404 	{NAME("nruns"),		CTL(stats_arenas_i_bins_j_nruns)},
405 	{NAME("nreruns"),	CTL(stats_arenas_i_bins_j_nreruns)},
406 	{NAME("curruns"),	CTL(stats_arenas_i_bins_j_curruns)}
407 };
408 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
409 	{NAME(""),		CHILD(named, stats_arenas_i_bins_j)}
410 };
411 
412 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
413 	{INDEX(stats_arenas_i_bins_j)}
414 };
415 
416 static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
417 	{NAME("nmalloc"),	CTL(stats_arenas_i_lruns_j_nmalloc)},
418 	{NAME("ndalloc"),	CTL(stats_arenas_i_lruns_j_ndalloc)},
419 	{NAME("nrequests"),	CTL(stats_arenas_i_lruns_j_nrequests)},
420 	{NAME("curruns"),	CTL(stats_arenas_i_lruns_j_curruns)}
421 };
422 static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
423 	{NAME(""),		CHILD(named, stats_arenas_i_lruns_j)}
424 };
425 
426 static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
427 	{INDEX(stats_arenas_i_lruns_j)}
428 };
429 
430 static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
431 	{NAME("nmalloc"),	CTL(stats_arenas_i_hchunks_j_nmalloc)},
432 	{NAME("ndalloc"),	CTL(stats_arenas_i_hchunks_j_ndalloc)},
433 	{NAME("nrequests"),	CTL(stats_arenas_i_hchunks_j_nrequests)},
434 	{NAME("curhchunks"),	CTL(stats_arenas_i_hchunks_j_curhchunks)}
435 };
436 static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
437 	{NAME(""),		CHILD(named, stats_arenas_i_hchunks_j)}
438 };
439 
440 static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
441 	{INDEX(stats_arenas_i_hchunks_j)}
442 };
443 
444 static const ctl_named_node_t stats_arenas_i_node[] = {
445 	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
446 	{NAME("dss"),		CTL(stats_arenas_i_dss)},
447 	{NAME("lg_dirty_mult"),	CTL(stats_arenas_i_lg_dirty_mult)},
448 	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
449 	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
450 	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
451 	{NAME("npurge"),	CTL(stats_arenas_i_npurge)},
452 	{NAME("nmadvise"),	CTL(stats_arenas_i_nmadvise)},
453 	{NAME("purged"),	CTL(stats_arenas_i_purged)},
454 	{NAME("metadata"),	CHILD(named, stats_arenas_i_metadata)},
455 	{NAME("small"),		CHILD(named, stats_arenas_i_small)},
456 	{NAME("large"),		CHILD(named, stats_arenas_i_large)},
457 	{NAME("huge"),		CHILD(named, stats_arenas_i_huge)},
458 	{NAME("bins"),		CHILD(indexed, stats_arenas_i_bins)},
459 	{NAME("lruns"),		CHILD(indexed, stats_arenas_i_lruns)},
460 	{NAME("hchunks"),	CHILD(indexed, stats_arenas_i_hchunks)}
461 };
462 static const ctl_named_node_t super_stats_arenas_i_node[] = {
463 	{NAME(""),		CHILD(named, stats_arenas_i)}
464 };
465 
466 static const ctl_indexed_node_t stats_arenas_node[] = {
467 	{INDEX(stats_arenas_i)}
468 };
469 
470 static const ctl_named_node_t stats_node[] = {
471 	{NAME("cactive"),	CTL(stats_cactive)},
472 	{NAME("allocated"),	CTL(stats_allocated)},
473 	{NAME("active"),	CTL(stats_active)},
474 	{NAME("metadata"),	CTL(stats_metadata)},
475 	{NAME("resident"),	CTL(stats_resident)},
476 	{NAME("mapped"),	CTL(stats_mapped)},
477 	{NAME("arenas"),	CHILD(indexed, stats_arenas)}
478 };
479 
480 static const ctl_named_node_t	root_node[] = {
481 	{NAME("version"),	CTL(version)},
482 	{NAME("epoch"),		CTL(epoch)},
483 	{NAME("thread"),	CHILD(named, thread)},
484 	{NAME("config"),	CHILD(named, config)},
485 	{NAME("opt"),		CHILD(named, opt)},
486 	{NAME("tcache"),	CHILD(named, tcache)},
487 	{NAME("arena"),		CHILD(indexed, arena)},
488 	{NAME("arenas"),	CHILD(named, arenas)},
489 	{NAME("prof"),		CHILD(named, prof)},
490 	{NAME("stats"),		CHILD(named, stats)}
491 };
492 static const ctl_named_node_t super_root_node[] = {
493 	{NAME(""),		CHILD(named, root)}
494 };
495 
496 #undef NAME
497 #undef CHILD
498 #undef CTL
499 #undef INDEX
500 
501 /******************************************************************************/
502 
503 static bool
ctl_arena_init(ctl_arena_stats_t * astats)504 ctl_arena_init(ctl_arena_stats_t *astats)
505 {
506 
507 	if (astats->lstats == NULL) {
508 		astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
509 		    sizeof(malloc_large_stats_t));
510 		if (astats->lstats == NULL)
511 			return (true);
512 	}
513 
514 	if (astats->hstats == NULL) {
515 		astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
516 		    sizeof(malloc_huge_stats_t));
517 		if (astats->hstats == NULL)
518 			return (true);
519 	}
520 
521 	return (false);
522 }
523 
524 static void
ctl_arena_clear(ctl_arena_stats_t * astats)525 ctl_arena_clear(ctl_arena_stats_t *astats)
526 {
527 
528 	astats->dss = dss_prec_names[dss_prec_limit];
529 	astats->lg_dirty_mult = -1;
530 	astats->pactive = 0;
531 	astats->pdirty = 0;
532 	if (config_stats) {
533 		memset(&astats->astats, 0, sizeof(arena_stats_t));
534 		astats->allocated_small = 0;
535 		astats->nmalloc_small = 0;
536 		astats->ndalloc_small = 0;
537 		astats->nrequests_small = 0;
538 		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
539 		memset(astats->lstats, 0, nlclasses *
540 		    sizeof(malloc_large_stats_t));
541 		memset(astats->hstats, 0, nhclasses *
542 		    sizeof(malloc_huge_stats_t));
543 	}
544 }
545 
546 static void
ctl_arena_stats_amerge(ctl_arena_stats_t * cstats,arena_t * arena)547 ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
548 {
549 	unsigned i;
550 
551 	arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
552 	    &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
553 	    cstats->lstats, cstats->hstats);
554 
555 	for (i = 0; i < NBINS; i++) {
556 		cstats->allocated_small += cstats->bstats[i].curregs *
557 		    index2size(i);
558 		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
559 		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
560 		cstats->nrequests_small += cstats->bstats[i].nrequests;
561 	}
562 }
563 
564 static void
ctl_arena_stats_smerge(ctl_arena_stats_t * sstats,ctl_arena_stats_t * astats)565 ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
566 {
567 	unsigned i;
568 
569 	sstats->pactive += astats->pactive;
570 	sstats->pdirty += astats->pdirty;
571 
572 	sstats->astats.mapped += astats->astats.mapped;
573 	sstats->astats.npurge += astats->astats.npurge;
574 	sstats->astats.nmadvise += astats->astats.nmadvise;
575 	sstats->astats.purged += astats->astats.purged;
576 
577 	sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
578 	sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
579 
580 	sstats->allocated_small += astats->allocated_small;
581 	sstats->nmalloc_small += astats->nmalloc_small;
582 	sstats->ndalloc_small += astats->ndalloc_small;
583 	sstats->nrequests_small += astats->nrequests_small;
584 
585 	sstats->astats.allocated_large += astats->astats.allocated_large;
586 	sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
587 	sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
588 	sstats->astats.nrequests_large += astats->astats.nrequests_large;
589 
590 	sstats->astats.allocated_huge += astats->astats.allocated_huge;
591 	sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
592 	sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
593 
594 	for (i = 0; i < NBINS; i++) {
595 		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
596 		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
597 		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
598 		sstats->bstats[i].curregs += astats->bstats[i].curregs;
599 		if (config_tcache) {
600 			sstats->bstats[i].nfills += astats->bstats[i].nfills;
601 			sstats->bstats[i].nflushes +=
602 			    astats->bstats[i].nflushes;
603 		}
604 		sstats->bstats[i].nruns += astats->bstats[i].nruns;
605 		sstats->bstats[i].reruns += astats->bstats[i].reruns;
606 		sstats->bstats[i].curruns += astats->bstats[i].curruns;
607 	}
608 
609 	for (i = 0; i < nlclasses; i++) {
610 		sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
611 		sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
612 		sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
613 		sstats->lstats[i].curruns += astats->lstats[i].curruns;
614 	}
615 
616 	for (i = 0; i < nhclasses; i++) {
617 		sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
618 		sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
619 		sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
620 	}
621 }
622 
623 static void
ctl_arena_refresh(arena_t * arena,unsigned i)624 ctl_arena_refresh(arena_t *arena, unsigned i)
625 {
626 	ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
627 	ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
628 
629 	ctl_arena_clear(astats);
630 
631 	sstats->nthreads += astats->nthreads;
632 	if (config_stats) {
633 		ctl_arena_stats_amerge(astats, arena);
634 		/* Merge into sum stats as well. */
635 		ctl_arena_stats_smerge(sstats, astats);
636 	} else {
637 		astats->pactive += arena->nactive;
638 		astats->pdirty += arena->ndirty;
639 		/* Merge into sum stats as well. */
640 		sstats->pactive += arena->nactive;
641 		sstats->pdirty += arena->ndirty;
642 	}
643 }
644 
645 static bool
ctl_grow(void)646 ctl_grow(void)
647 {
648 	ctl_arena_stats_t *astats;
649 
650 	/* Initialize new arena. */
651 	if (arena_init(ctl_stats.narenas) == NULL)
652 		return (true);
653 
654 	/* Allocate extended arena stats. */
655 	astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
656 	    sizeof(ctl_arena_stats_t));
657 	if (astats == NULL)
658 		return (true);
659 
660 	/* Initialize the new astats element. */
661 	memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
662 	    sizeof(ctl_arena_stats_t));
663 	memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
664 	if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
665 		a0dalloc(astats);
666 		return (true);
667 	}
668 	/* Swap merged stats to their new location. */
669 	{
670 		ctl_arena_stats_t tstats;
671 		memcpy(&tstats, &astats[ctl_stats.narenas],
672 		    sizeof(ctl_arena_stats_t));
673 		memcpy(&astats[ctl_stats.narenas],
674 		    &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
675 		memcpy(&astats[ctl_stats.narenas + 1], &tstats,
676 		    sizeof(ctl_arena_stats_t));
677 	}
678 	a0dalloc(ctl_stats.arenas);
679 	ctl_stats.arenas = astats;
680 	ctl_stats.narenas++;
681 
682 	return (false);
683 }
684 
685 static void
ctl_refresh(void)686 ctl_refresh(void)
687 {
688 	tsd_t *tsd;
689 	unsigned i;
690 	bool refreshed;
691 	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
692 
693 	/*
694 	 * Clear sum stats, since they will be merged into by
695 	 * ctl_arena_refresh().
696 	 */
697 	ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
698 	ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
699 
700 	tsd = tsd_fetch();
701 	for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
702 		tarenas[i] = arena_get(tsd, i, false, false);
703 		if (tarenas[i] == NULL && !refreshed) {
704 			tarenas[i] = arena_get(tsd, i, false, true);
705 			refreshed = true;
706 		}
707 	}
708 
709 	for (i = 0; i < ctl_stats.narenas; i++) {
710 		if (tarenas[i] != NULL)
711 			ctl_stats.arenas[i].nthreads = arena_nbound(i);
712 		else
713 			ctl_stats.arenas[i].nthreads = 0;
714 	}
715 
716 	for (i = 0; i < ctl_stats.narenas; i++) {
717 		bool initialized = (tarenas[i] != NULL);
718 
719 		ctl_stats.arenas[i].initialized = initialized;
720 		if (initialized)
721 			ctl_arena_refresh(tarenas[i], i);
722 	}
723 
724 	if (config_stats) {
725 		size_t base_allocated, base_resident, base_mapped;
726 		base_stats_get(&base_allocated, &base_resident, &base_mapped);
727 		ctl_stats.allocated =
728 		    ctl_stats.arenas[ctl_stats.narenas].allocated_small +
729 		    ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
730 		    ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
731 		ctl_stats.active =
732 		    (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
733 		ctl_stats.metadata = base_allocated +
734 		    ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
735 		    ctl_stats.arenas[ctl_stats.narenas].astats
736 		    .metadata_allocated;
737 		ctl_stats.resident = base_resident +
738 		    ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
739 		    ((ctl_stats.arenas[ctl_stats.narenas].pactive +
740 		    ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
741 		ctl_stats.mapped = base_mapped +
742 		    ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
743 	}
744 
745 	ctl_epoch++;
746 }
747 
748 static bool
ctl_init(void)749 ctl_init(void)
750 {
751 	bool ret;
752 
753 	malloc_mutex_lock(&ctl_mtx);
754 	if (!ctl_initialized) {
755 		/*
756 		 * Allocate space for one extra arena stats element, which
757 		 * contains summed stats across all arenas.
758 		 */
759 		ctl_stats.narenas = narenas_total_get();
760 		ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
761 		    (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
762 		if (ctl_stats.arenas == NULL) {
763 			ret = true;
764 			goto label_return;
765 		}
766 		memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
767 		    sizeof(ctl_arena_stats_t));
768 
769 		/*
770 		 * Initialize all stats structures, regardless of whether they
771 		 * ever get used.  Lazy initialization would allow errors to
772 		 * cause inconsistent state to be viewable by the application.
773 		 */
774 		if (config_stats) {
775 			unsigned i;
776 			for (i = 0; i <= ctl_stats.narenas; i++) {
777 				if (ctl_arena_init(&ctl_stats.arenas[i])) {
778 					unsigned j;
779 					for (j = 0; j < i; j++) {
780 						a0dalloc(
781 						    ctl_stats.arenas[j].lstats);
782 						a0dalloc(
783 						    ctl_stats.arenas[j].hstats);
784 					}
785 					a0dalloc(ctl_stats.arenas);
786 					ctl_stats.arenas = NULL;
787 					ret = true;
788 					goto label_return;
789 				}
790 			}
791 		}
792 		ctl_stats.arenas[ctl_stats.narenas].initialized = true;
793 
794 		ctl_epoch = 0;
795 		ctl_refresh();
796 		ctl_initialized = true;
797 	}
798 
799 	ret = false;
800 label_return:
801 	malloc_mutex_unlock(&ctl_mtx);
802 	return (ret);
803 }
804 
805 static int
ctl_lookup(const char * name,ctl_node_t const ** nodesp,size_t * mibp,size_t * depthp)806 ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
807     size_t *depthp)
808 {
809 	int ret;
810 	const char *elm, *tdot, *dot;
811 	size_t elen, i, j;
812 	const ctl_named_node_t *node;
813 
814 	elm = name;
815 	/* Equivalent to strchrnul(). */
816 	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
817 	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
818 	if (elen == 0) {
819 		ret = ENOENT;
820 		goto label_return;
821 	}
822 	node = super_root_node;
823 	for (i = 0; i < *depthp; i++) {
824 		assert(node);
825 		assert(node->nchildren > 0);
826 		if (ctl_named_node(node->children) != NULL) {
827 			const ctl_named_node_t *pnode = node;
828 
829 			/* Children are named. */
830 			for (j = 0; j < node->nchildren; j++) {
831 				const ctl_named_node_t *child =
832 				    ctl_named_children(node, j);
833 				if (strlen(child->name) == elen &&
834 				    strncmp(elm, child->name, elen) == 0) {
835 					node = child;
836 					if (nodesp != NULL)
837 						nodesp[i] =
838 						    (const ctl_node_t *)node;
839 					mibp[i] = j;
840 					break;
841 				}
842 			}
843 			if (node == pnode) {
844 				ret = ENOENT;
845 				goto label_return;
846 			}
847 		} else {
848 			uintmax_t index;
849 			const ctl_indexed_node_t *inode;
850 
851 			/* Children are indexed. */
852 			index = malloc_strtoumax(elm, NULL, 10);
853 			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
854 				ret = ENOENT;
855 				goto label_return;
856 			}
857 
858 			inode = ctl_indexed_node(node->children);
859 			node = inode->index(mibp, *depthp, (size_t)index);
860 			if (node == NULL) {
861 				ret = ENOENT;
862 				goto label_return;
863 			}
864 
865 			if (nodesp != NULL)
866 				nodesp[i] = (const ctl_node_t *)node;
867 			mibp[i] = (size_t)index;
868 		}
869 
870 		if (node->ctl != NULL) {
871 			/* Terminal node. */
872 			if (*dot != '\0') {
873 				/*
874 				 * The name contains more elements than are
875 				 * in this path through the tree.
876 				 */
877 				ret = ENOENT;
878 				goto label_return;
879 			}
880 			/* Complete lookup successful. */
881 			*depthp = i + 1;
882 			break;
883 		}
884 
885 		/* Update elm. */
886 		if (*dot == '\0') {
887 			/* No more elements. */
888 			ret = ENOENT;
889 			goto label_return;
890 		}
891 		elm = &dot[1];
892 		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
893 		    strchr(elm, '\0');
894 		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
895 	}
896 
897 	ret = 0;
898 label_return:
899 	return (ret);
900 }
901 
902 int
ctl_byname(const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)903 ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
904     size_t newlen)
905 {
906 	int ret;
907 	size_t depth;
908 	ctl_node_t const *nodes[CTL_MAX_DEPTH];
909 	size_t mib[CTL_MAX_DEPTH];
910 	const ctl_named_node_t *node;
911 
912 	if (!ctl_initialized && ctl_init()) {
913 		ret = EAGAIN;
914 		goto label_return;
915 	}
916 
917 	depth = CTL_MAX_DEPTH;
918 	ret = ctl_lookup(name, nodes, mib, &depth);
919 	if (ret != 0)
920 		goto label_return;
921 
922 	node = ctl_named_node(nodes[depth-1]);
923 	if (node != NULL && node->ctl)
924 		ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
925 	else {
926 		/* The name refers to a partial path through the ctl tree. */
927 		ret = ENOENT;
928 	}
929 
930 label_return:
931 	return(ret);
932 }
933 
934 int
ctl_nametomib(const char * name,size_t * mibp,size_t * miblenp)935 ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
936 {
937 	int ret;
938 
939 	if (!ctl_initialized && ctl_init()) {
940 		ret = EAGAIN;
941 		goto label_return;
942 	}
943 
944 	ret = ctl_lookup(name, NULL, mibp, miblenp);
945 label_return:
946 	return(ret);
947 }
948 
949 int
ctl_bymib(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)950 ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
951     void *newp, size_t newlen)
952 {
953 	int ret;
954 	const ctl_named_node_t *node;
955 	size_t i;
956 
957 	if (!ctl_initialized && ctl_init()) {
958 		ret = EAGAIN;
959 		goto label_return;
960 	}
961 
962 	/* Iterate down the tree. */
963 	node = super_root_node;
964 	for (i = 0; i < miblen; i++) {
965 		assert(node);
966 		assert(node->nchildren > 0);
967 		if (ctl_named_node(node->children) != NULL) {
968 			/* Children are named. */
969 			if (node->nchildren <= mib[i]) {
970 				ret = ENOENT;
971 				goto label_return;
972 			}
973 			node = ctl_named_children(node, mib[i]);
974 		} else {
975 			const ctl_indexed_node_t *inode;
976 
977 			/* Indexed element. */
978 			inode = ctl_indexed_node(node->children);
979 			node = inode->index(mib, miblen, mib[i]);
980 			if (node == NULL) {
981 				ret = ENOENT;
982 				goto label_return;
983 			}
984 		}
985 	}
986 
987 	/* Call the ctl function. */
988 	if (node && node->ctl)
989 		ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
990 	else {
991 		/* Partial MIB. */
992 		ret = ENOENT;
993 	}
994 
995 label_return:
996 	return(ret);
997 }
998 
999 bool
ctl_boot(void)1000 ctl_boot(void)
1001 {
1002 
1003 	if (malloc_mutex_init(&ctl_mtx))
1004 		return (true);
1005 
1006 	ctl_initialized = false;
1007 
1008 	return (false);
1009 }
1010 
1011 void
ctl_prefork(void)1012 ctl_prefork(void)
1013 {
1014 
1015 	malloc_mutex_prefork(&ctl_mtx);
1016 }
1017 
1018 void
ctl_postfork_parent(void)1019 ctl_postfork_parent(void)
1020 {
1021 
1022 	malloc_mutex_postfork_parent(&ctl_mtx);
1023 }
1024 
1025 void
ctl_postfork_child(void)1026 ctl_postfork_child(void)
1027 {
1028 
1029 	malloc_mutex_postfork_child(&ctl_mtx);
1030 }
1031 
1032 /******************************************************************************/
1033 /* *_ctl() functions. */
1034 
1035 #define	READONLY()	do {						\
1036 	if (newp != NULL || newlen != 0) {				\
1037 		ret = EPERM;						\
1038 		goto label_return;					\
1039 	}								\
1040 } while (0)
1041 
1042 #define	WRITEONLY()	do {						\
1043 	if (oldp != NULL || oldlenp != NULL) {				\
1044 		ret = EPERM;						\
1045 		goto label_return;					\
1046 	}								\
1047 } while (0)
1048 
1049 #define	READ_XOR_WRITE()	do {					\
1050 	if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||	\
1051 	    newlen != 0)) {						\
1052 		ret = EPERM;						\
1053 		goto label_return;					\
1054 	}								\
1055 } while (0)
1056 
1057 #define	READ(v, t)	do {						\
1058 	if (oldp != NULL && oldlenp != NULL) {				\
1059 		if (*oldlenp != sizeof(t)) {				\
1060 			size_t	copylen = (sizeof(t) <= *oldlenp)	\
1061 			    ? sizeof(t) : *oldlenp;			\
1062 			memcpy(oldp, (void *)&(v), copylen);		\
1063 			ret = EINVAL;					\
1064 			goto label_return;				\
1065 		} else							\
1066 			*(t *)oldp = (v);				\
1067 	}								\
1068 } while (0)
1069 
1070 #define	WRITE(v, t)	do {						\
1071 	if (newp != NULL) {						\
1072 		if (newlen != sizeof(t)) {				\
1073 			ret = EINVAL;					\
1074 			goto label_return;				\
1075 		}							\
1076 		(v) = *(t *)newp;					\
1077 	}								\
1078 } while (0)
1079 
1080 /*
1081  * There's a lot of code duplication in the following macros due to limitations
1082  * in how nested cpp macros are expanded.
1083  */
1084 #define	CTL_RO_CLGEN(c, l, n, v, t)					\
1085 static int								\
1086 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1087     void *newp, size_t newlen)						\
1088 {									\
1089 	int ret;							\
1090 	t oldval;							\
1091 									\
1092 	if (!(c))							\
1093 		return (ENOENT);					\
1094 	if (l)								\
1095 		malloc_mutex_lock(&ctl_mtx);				\
1096 	READONLY();							\
1097 	oldval = (v);							\
1098 	READ(oldval, t);						\
1099 									\
1100 	ret = 0;							\
1101 label_return:								\
1102 	if (l)								\
1103 		malloc_mutex_unlock(&ctl_mtx);				\
1104 	return (ret);							\
1105 }
1106 
1107 #define	CTL_RO_CGEN(c, n, v, t)						\
1108 static int								\
1109 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1110     void *newp, size_t newlen)						\
1111 {									\
1112 	int ret;							\
1113 	t oldval;							\
1114 									\
1115 	if (!(c))							\
1116 		return (ENOENT);					\
1117 	malloc_mutex_lock(&ctl_mtx);					\
1118 	READONLY();							\
1119 	oldval = (v);							\
1120 	READ(oldval, t);						\
1121 									\
1122 	ret = 0;							\
1123 label_return:								\
1124 	malloc_mutex_unlock(&ctl_mtx);					\
1125 	return (ret);							\
1126 }
1127 
1128 #define	CTL_RO_GEN(n, v, t)						\
1129 static int								\
1130 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1131     void *newp, size_t newlen)						\
1132 {									\
1133 	int ret;							\
1134 	t oldval;							\
1135 									\
1136 	malloc_mutex_lock(&ctl_mtx);					\
1137 	READONLY();							\
1138 	oldval = (v);							\
1139 	READ(oldval, t);						\
1140 									\
1141 	ret = 0;							\
1142 label_return:								\
1143 	malloc_mutex_unlock(&ctl_mtx);					\
1144 	return (ret);							\
1145 }
1146 
1147 /*
1148  * ctl_mtx is not acquired, under the assumption that no pertinent data will
1149  * mutate during the call.
1150  */
1151 #define	CTL_RO_NL_CGEN(c, n, v, t)					\
1152 static int								\
1153 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1154     void *newp, size_t newlen)						\
1155 {									\
1156 	int ret;							\
1157 	t oldval;							\
1158 									\
1159 	if (!(c))							\
1160 		return (ENOENT);					\
1161 	READONLY();							\
1162 	oldval = (v);							\
1163 	READ(oldval, t);						\
1164 									\
1165 	ret = 0;							\
1166 label_return:								\
1167 	return (ret);							\
1168 }
1169 
1170 #define	CTL_RO_NL_GEN(n, v, t)						\
1171 static int								\
1172 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1173     void *newp, size_t newlen)						\
1174 {									\
1175 	int ret;							\
1176 	t oldval;							\
1177 									\
1178 	READONLY();							\
1179 	oldval = (v);							\
1180 	READ(oldval, t);						\
1181 									\
1182 	ret = 0;							\
1183 label_return:								\
1184 	return (ret);							\
1185 }
1186 
1187 #define	CTL_TSD_RO_NL_CGEN(c, n, m, t)					\
1188 static int								\
1189 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1190     void *newp, size_t newlen)						\
1191 {									\
1192 	int ret;							\
1193 	t oldval;							\
1194 	tsd_t *tsd;							\
1195 									\
1196 	if (!(c))							\
1197 		return (ENOENT);					\
1198 	READONLY();							\
1199 	tsd = tsd_fetch();						\
1200 	oldval = (m(tsd));						\
1201 	READ(oldval, t);						\
1202 									\
1203 	ret = 0;							\
1204 label_return:								\
1205 	return (ret);							\
1206 }
1207 
1208 #define	CTL_RO_BOOL_CONFIG_GEN(n)					\
1209 static int								\
1210 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1211     void *newp, size_t newlen)						\
1212 {									\
1213 	int ret;							\
1214 	bool oldval;							\
1215 									\
1216 	READONLY();							\
1217 	oldval = n;							\
1218 	READ(oldval, bool);						\
1219 									\
1220 	ret = 0;							\
1221 label_return:								\
1222 	return (ret);							\
1223 }
1224 
1225 /******************************************************************************/
1226 
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1227 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1228 
1229 static int
1230 epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1231     void *newp, size_t newlen)
1232 {
1233 	int ret;
1234 	UNUSED uint64_t newval;
1235 
1236 	malloc_mutex_lock(&ctl_mtx);
1237 	WRITE(newval, uint64_t);
1238 	if (newp != NULL)
1239 		ctl_refresh();
1240 	READ(ctl_epoch, uint64_t);
1241 
1242 	ret = 0;
1243 label_return:
1244 	malloc_mutex_unlock(&ctl_mtx);
1245 	return (ret);
1246 }
1247 
1248 /******************************************************************************/
1249 
1250 CTL_RO_BOOL_CONFIG_GEN(config_debug)
CTL_RO_BOOL_CONFIG_GEN(config_fill)1251 CTL_RO_BOOL_CONFIG_GEN(config_fill)
1252 CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
1253 CTL_RO_BOOL_CONFIG_GEN(config_munmap)
1254 CTL_RO_BOOL_CONFIG_GEN(config_prof)
1255 CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
1256 CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
1257 CTL_RO_BOOL_CONFIG_GEN(config_stats)
1258 CTL_RO_BOOL_CONFIG_GEN(config_tcache)
1259 CTL_RO_BOOL_CONFIG_GEN(config_tls)
1260 CTL_RO_BOOL_CONFIG_GEN(config_utrace)
1261 CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
1262 CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
1263 
1264 /******************************************************************************/
1265 
1266 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1267 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1268 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
1269 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
1270 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
1271 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1272 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1273 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
1274 CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
1275 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1276 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1277 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1278 CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1279 CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1280 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1281 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1282 CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1283 CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1284     opt_prof_thread_active_init, bool)
1285 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1286 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1287 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1288 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1289 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1290 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1291 
1292 /******************************************************************************/
1293 
1294 static int
1295 thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1296     void *newp, size_t newlen)
1297 {
1298 	int ret;
1299 	tsd_t *tsd;
1300 	arena_t *oldarena;
1301 	unsigned newind, oldind;
1302 
1303 	tsd = tsd_fetch();
1304 	oldarena = arena_choose(tsd, NULL);
1305 	if (oldarena == NULL)
1306 		return (EAGAIN);
1307 
1308 	malloc_mutex_lock(&ctl_mtx);
1309 	newind = oldind = oldarena->ind;
1310 	WRITE(newind, unsigned);
1311 	READ(oldind, unsigned);
1312 	if (newind != oldind) {
1313 		arena_t *newarena;
1314 
1315 		if (newind >= ctl_stats.narenas) {
1316 			/* New arena index is out of range. */
1317 			ret = EFAULT;
1318 			goto label_return;
1319 		}
1320 
1321 		/* Initialize arena if necessary. */
1322 		newarena = arena_get(tsd, newind, true, true);
1323 		if (newarena == NULL) {
1324 			ret = EAGAIN;
1325 			goto label_return;
1326 		}
1327 		/* Set new arena/tcache associations. */
1328 		arena_migrate(tsd, oldind, newind);
1329 		if (config_tcache) {
1330 			tcache_t *tcache = tsd_tcache_get(tsd);
1331 			if (tcache != NULL) {
1332 				tcache_arena_reassociate(tcache, oldarena,
1333 				    newarena);
1334 			}
1335 		}
1336 	}
1337 
1338 	ret = 0;
1339 label_return:
1340 	malloc_mutex_unlock(&ctl_mtx);
1341 	return (ret);
1342 }
1343 
CTL_TSD_RO_NL_CGEN(config_stats,thread_allocated,tsd_thread_allocated_get,uint64_t)1344 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1345     uint64_t)
1346 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1347     uint64_t *)
1348 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1349     uint64_t)
1350 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1351     tsd_thread_deallocatedp_get, uint64_t *)
1352 
1353 static int
1354 thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
1355     size_t *oldlenp, void *newp, size_t newlen)
1356 {
1357 	int ret;
1358 	bool oldval;
1359 
1360 	if (!config_tcache)
1361 		return (ENOENT);
1362 
1363 	oldval = tcache_enabled_get();
1364 	if (newp != NULL) {
1365 		if (newlen != sizeof(bool)) {
1366 			ret = EINVAL;
1367 			goto label_return;
1368 		}
1369 		tcache_enabled_set(*(bool *)newp);
1370 	}
1371 	READ(oldval, bool);
1372 
1373 	ret = 0;
1374 label_return:
1375 	return (ret);
1376 }
1377 
1378 static int
thread_tcache_flush_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1379 thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
1380     size_t *oldlenp, void *newp, size_t newlen)
1381 {
1382 	int ret;
1383 
1384 	if (!config_tcache)
1385 		return (ENOENT);
1386 
1387 	READONLY();
1388 	WRITEONLY();
1389 
1390 	tcache_flush();
1391 
1392 	ret = 0;
1393 label_return:
1394 	return (ret);
1395 }
1396 
1397 static int
thread_prof_name_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1398 thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
1399     size_t *oldlenp, void *newp, size_t newlen)
1400 {
1401 	int ret;
1402 
1403 	if (!config_prof)
1404 		return (ENOENT);
1405 
1406 	READ_XOR_WRITE();
1407 
1408 	if (newp != NULL) {
1409 		tsd_t *tsd;
1410 
1411 		if (newlen != sizeof(const char *)) {
1412 			ret = EINVAL;
1413 			goto label_return;
1414 		}
1415 
1416 		tsd = tsd_fetch();
1417 
1418 		if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1419 		    0)
1420 			goto label_return;
1421 	} else {
1422 		const char *oldname = prof_thread_name_get();
1423 		READ(oldname, const char *);
1424 	}
1425 
1426 	ret = 0;
1427 label_return:
1428 	return (ret);
1429 }
1430 
1431 static int
thread_prof_active_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1432 thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
1433     size_t *oldlenp, void *newp, size_t newlen)
1434 {
1435 	int ret;
1436 	bool oldval;
1437 
1438 	if (!config_prof)
1439 		return (ENOENT);
1440 
1441 	oldval = prof_thread_active_get();
1442 	if (newp != NULL) {
1443 		if (newlen != sizeof(bool)) {
1444 			ret = EINVAL;
1445 			goto label_return;
1446 		}
1447 		if (prof_thread_active_set(*(bool *)newp)) {
1448 			ret = EAGAIN;
1449 			goto label_return;
1450 		}
1451 	}
1452 	READ(oldval, bool);
1453 
1454 	ret = 0;
1455 label_return:
1456 	return (ret);
1457 }
1458 
1459 /******************************************************************************/
1460 
1461 static int
tcache_create_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1462 tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1463     void *newp, size_t newlen)
1464 {
1465 	int ret;
1466 	tsd_t *tsd;
1467 	unsigned tcache_ind;
1468 
1469 	if (!config_tcache)
1470 		return (ENOENT);
1471 
1472 	tsd = tsd_fetch();
1473 
1474 	malloc_mutex_lock(&ctl_mtx);
1475 	READONLY();
1476 	if (tcaches_create(tsd, &tcache_ind)) {
1477 		ret = EFAULT;
1478 		goto label_return;
1479 	}
1480 	READ(tcache_ind, unsigned);
1481 
1482 	ret = 0;
1483 label_return:
1484 	malloc_mutex_unlock(&ctl_mtx);
1485 	return (ret);
1486 }
1487 
1488 static int
tcache_flush_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1489 tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1490     void *newp, size_t newlen)
1491 {
1492 	int ret;
1493 	tsd_t *tsd;
1494 	unsigned tcache_ind;
1495 
1496 	if (!config_tcache)
1497 		return (ENOENT);
1498 
1499 	tsd = tsd_fetch();
1500 
1501 	WRITEONLY();
1502 	tcache_ind = UINT_MAX;
1503 	WRITE(tcache_ind, unsigned);
1504 	if (tcache_ind == UINT_MAX) {
1505 		ret = EFAULT;
1506 		goto label_return;
1507 	}
1508 	tcaches_flush(tsd, tcache_ind);
1509 
1510 	ret = 0;
1511 label_return:
1512 	return (ret);
1513 }
1514 
1515 static int
tcache_destroy_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1516 tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
1517     size_t *oldlenp, void *newp, size_t newlen)
1518 {
1519 	int ret;
1520 	tsd_t *tsd;
1521 	unsigned tcache_ind;
1522 
1523 	if (!config_tcache)
1524 		return (ENOENT);
1525 
1526 	tsd = tsd_fetch();
1527 
1528 	WRITEONLY();
1529 	tcache_ind = UINT_MAX;
1530 	WRITE(tcache_ind, unsigned);
1531 	if (tcache_ind == UINT_MAX) {
1532 		ret = EFAULT;
1533 		goto label_return;
1534 	}
1535 	tcaches_destroy(tsd, tcache_ind);
1536 
1537 	ret = 0;
1538 label_return:
1539 	return (ret);
1540 }
1541 
1542 /******************************************************************************/
1543 
1544 /* ctl_mutex must be held during execution of this function. */
1545 static void
arena_purge(unsigned arena_ind)1546 arena_purge(unsigned arena_ind)
1547 {
1548 	tsd_t *tsd;
1549 	unsigned i;
1550 	bool refreshed;
1551 	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
1552 
1553 	tsd = tsd_fetch();
1554 	for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
1555 		tarenas[i] = arena_get(tsd, i, false, false);
1556 		if (tarenas[i] == NULL && !refreshed) {
1557 			tarenas[i] = arena_get(tsd, i, false, true);
1558 			refreshed = true;
1559 		}
1560 	}
1561 
1562 	if (arena_ind == ctl_stats.narenas) {
1563 		unsigned i;
1564 		for (i = 0; i < ctl_stats.narenas; i++) {
1565 			if (tarenas[i] != NULL)
1566 				arena_purge_all(tarenas[i]);
1567 		}
1568 	} else {
1569 		assert(arena_ind < ctl_stats.narenas);
1570 		if (tarenas[arena_ind] != NULL)
1571 			arena_purge_all(tarenas[arena_ind]);
1572 	}
1573 }
1574 
1575 static int
arena_i_purge_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1576 arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1577     void *newp, size_t newlen)
1578 {
1579 	int ret;
1580 
1581 	READONLY();
1582 	WRITEONLY();
1583 	malloc_mutex_lock(&ctl_mtx);
1584 	arena_purge(mib[1]);
1585 	malloc_mutex_unlock(&ctl_mtx);
1586 
1587 	ret = 0;
1588 label_return:
1589 	return (ret);
1590 }
1591 
1592 static int
arena_i_dss_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1593 arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1594     void *newp, size_t newlen)
1595 {
1596 	int ret;
1597 	const char *dss = NULL;
1598 	unsigned arena_ind = mib[1];
1599 	dss_prec_t dss_prec_old = dss_prec_limit;
1600 	dss_prec_t dss_prec = dss_prec_limit;
1601 
1602 	malloc_mutex_lock(&ctl_mtx);
1603 	WRITE(dss, const char *);
1604 	if (dss != NULL) {
1605 		int i;
1606 		bool match = false;
1607 
1608 		for (i = 0; i < dss_prec_limit; i++) {
1609 			if (strcmp(dss_prec_names[i], dss) == 0) {
1610 				dss_prec = i;
1611 				match = true;
1612 				break;
1613 			}
1614 		}
1615 
1616 		if (!match) {
1617 			ret = EINVAL;
1618 			goto label_return;
1619 		}
1620 	}
1621 
1622 	if (arena_ind < ctl_stats.narenas) {
1623 		arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
1624 		if (arena == NULL || (dss_prec != dss_prec_limit &&
1625 		    arena_dss_prec_set(arena, dss_prec))) {
1626 			ret = EFAULT;
1627 			goto label_return;
1628 		}
1629 		dss_prec_old = arena_dss_prec_get(arena);
1630 	} else {
1631 		if (dss_prec != dss_prec_limit &&
1632 		    chunk_dss_prec_set(dss_prec)) {
1633 			ret = EFAULT;
1634 			goto label_return;
1635 		}
1636 		dss_prec_old = chunk_dss_prec_get();
1637 	}
1638 
1639 	dss = dss_prec_names[dss_prec_old];
1640 	READ(dss, const char *);
1641 
1642 	ret = 0;
1643 label_return:
1644 	malloc_mutex_unlock(&ctl_mtx);
1645 	return (ret);
1646 }
1647 
1648 static int
arena_i_lg_dirty_mult_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1649 arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
1650     size_t *oldlenp, void *newp, size_t newlen)
1651 {
1652 	int ret;
1653 	unsigned arena_ind = mib[1];
1654 	arena_t *arena;
1655 
1656 	arena = arena_get(tsd_fetch(), arena_ind, false, true);
1657 	if (arena == NULL) {
1658 		ret = EFAULT;
1659 		goto label_return;
1660 	}
1661 
1662 	if (oldp != NULL && oldlenp != NULL) {
1663 		size_t oldval = arena_lg_dirty_mult_get(arena);
1664 		READ(oldval, ssize_t);
1665 	}
1666 	if (newp != NULL) {
1667 		if (newlen != sizeof(ssize_t)) {
1668 			ret = EINVAL;
1669 			goto label_return;
1670 		}
1671 		if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
1672 			ret = EFAULT;
1673 			goto label_return;
1674 		}
1675 	}
1676 
1677 	ret = 0;
1678 label_return:
1679 	return (ret);
1680 }
1681 
1682 #define	CHUNK_FUNC(n)							\
1683 static int								\
1684 arena_i_chunk_##n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
1685     size_t *oldlenp, void *newp, size_t newlen)				\
1686 {									\
1687 									\
1688 	int ret;							\
1689 	unsigned arena_ind = mib[1];					\
1690 	arena_t *arena;							\
1691 									\
1692 	malloc_mutex_lock(&ctl_mtx);					\
1693 	if (arena_ind < narenas_total_get() && (arena =			\
1694 	    arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {	\
1695 		malloc_mutex_lock(&arena->lock);			\
1696 		READ(arena->chunk_##n, chunk_##n##_t *);		\
1697 		WRITE(arena->chunk_##n, chunk_##n##_t *);		\
1698 	} else {							\
1699 		ret = EFAULT;						\
1700 		goto label_outer_return;				\
1701 	}								\
1702 	ret = 0;							\
1703 label_return:								\
1704 	malloc_mutex_unlock(&arena->lock);				\
1705 label_outer_return:							\
1706 	malloc_mutex_unlock(&ctl_mtx);					\
1707 	return (ret);							\
1708 }
1709 CHUNK_FUNC(alloc)
CHUNK_FUNC(dalloc)1710 CHUNK_FUNC(dalloc)
1711 CHUNK_FUNC(purge)
1712 #undef CHUNK_FUNC
1713 
1714 static const ctl_named_node_t *
1715 arena_i_index(const size_t *mib, size_t miblen, size_t i)
1716 {
1717 	const ctl_named_node_t * ret;
1718 
1719 	malloc_mutex_lock(&ctl_mtx);
1720 	if (i > ctl_stats.narenas) {
1721 		ret = NULL;
1722 		goto label_return;
1723 	}
1724 
1725 	ret = super_arena_i_node;
1726 label_return:
1727 	malloc_mutex_unlock(&ctl_mtx);
1728 	return (ret);
1729 }
1730 
1731 /******************************************************************************/
1732 
1733 static int
arenas_narenas_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1734 arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
1735     size_t *oldlenp, void *newp, size_t newlen)
1736 {
1737 	int ret;
1738 	unsigned narenas;
1739 
1740 	malloc_mutex_lock(&ctl_mtx);
1741 	READONLY();
1742 	if (*oldlenp != sizeof(unsigned)) {
1743 		ret = EINVAL;
1744 		goto label_return;
1745 	}
1746 	narenas = ctl_stats.narenas;
1747 	READ(narenas, unsigned);
1748 
1749 	ret = 0;
1750 label_return:
1751 	malloc_mutex_unlock(&ctl_mtx);
1752 	return (ret);
1753 }
1754 
1755 static int
arenas_initialized_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1756 arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
1757     size_t *oldlenp, void *newp, size_t newlen)
1758 {
1759 	int ret;
1760 	unsigned nread, i;
1761 
1762 	malloc_mutex_lock(&ctl_mtx);
1763 	READONLY();
1764 	if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
1765 		ret = EINVAL;
1766 		nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
1767 		    ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
1768 	} else {
1769 		ret = 0;
1770 		nread = ctl_stats.narenas;
1771 	}
1772 
1773 	for (i = 0; i < nread; i++)
1774 		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
1775 
1776 label_return:
1777 	malloc_mutex_unlock(&ctl_mtx);
1778 	return (ret);
1779 }
1780 
1781 static int
arenas_lg_dirty_mult_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1782 arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
1783     size_t *oldlenp, void *newp, size_t newlen)
1784 {
1785 	int ret;
1786 
1787 	if (oldp != NULL && oldlenp != NULL) {
1788 		size_t oldval = arena_lg_dirty_mult_default_get();
1789 		READ(oldval, ssize_t);
1790 	}
1791 	if (newp != NULL) {
1792 		if (newlen != sizeof(ssize_t)) {
1793 			ret = EINVAL;
1794 			goto label_return;
1795 		}
1796 		if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
1797 			ret = EFAULT;
1798 			goto label_return;
1799 		}
1800 	}
1801 
1802 	ret = 0;
1803 label_return:
1804 	return (ret);
1805 }
1806 
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)1807 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1808 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
1809 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1810 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1811 CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1812 CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1813 CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1814 CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
1815 static const ctl_named_node_t *
1816 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
1817 {
1818 
1819 	if (i > NBINS)
1820 		return (NULL);
1821 	return (super_arenas_bin_i_node);
1822 }
1823 
CTL_RO_NL_GEN(arenas_nlruns,nlclasses,unsigned)1824 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
1825 CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
1826 static const ctl_named_node_t *
1827 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
1828 {
1829 
1830 	if (i > nlclasses)
1831 		return (NULL);
1832 	return (super_arenas_lrun_i_node);
1833 }
1834 
CTL_RO_NL_GEN(arenas_nhchunks,nhclasses,unsigned)1835 CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
1836 CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
1837 static const ctl_named_node_t *
1838 arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
1839 {
1840 
1841 	if (i > nhclasses)
1842 		return (NULL);
1843 	return (super_arenas_hchunk_i_node);
1844 }
1845 
1846 static int
arenas_extend_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1847 arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1848     void *newp, size_t newlen)
1849 {
1850 	int ret;
1851 	unsigned narenas;
1852 
1853 	malloc_mutex_lock(&ctl_mtx);
1854 	READONLY();
1855 	if (ctl_grow()) {
1856 		ret = EAGAIN;
1857 		goto label_return;
1858 	}
1859 	narenas = ctl_stats.narenas - 1;
1860 	READ(narenas, unsigned);
1861 
1862 	ret = 0;
1863 label_return:
1864 	malloc_mutex_unlock(&ctl_mtx);
1865 	return (ret);
1866 }
1867 
1868 /******************************************************************************/
1869 
1870 static int
prof_thread_active_init_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1871 prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
1872     size_t *oldlenp, void *newp, size_t newlen)
1873 {
1874 	int ret;
1875 	bool oldval;
1876 
1877 	if (!config_prof)
1878 		return (ENOENT);
1879 
1880 	if (newp != NULL) {
1881 		if (newlen != sizeof(bool)) {
1882 			ret = EINVAL;
1883 			goto label_return;
1884 		}
1885 		oldval = prof_thread_active_init_set(*(bool *)newp);
1886 	} else
1887 		oldval = prof_thread_active_init_get();
1888 	READ(oldval, bool);
1889 
1890 	ret = 0;
1891 label_return:
1892 	return (ret);
1893 }
1894 
1895 static int
prof_active_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1896 prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1897     void *newp, size_t newlen)
1898 {
1899 	int ret;
1900 	bool oldval;
1901 
1902 	if (!config_prof)
1903 		return (ENOENT);
1904 
1905 	if (newp != NULL) {
1906 		if (newlen != sizeof(bool)) {
1907 			ret = EINVAL;
1908 			goto label_return;
1909 		}
1910 		oldval = prof_active_set(*(bool *)newp);
1911 	} else
1912 		oldval = prof_active_get();
1913 	READ(oldval, bool);
1914 
1915 	ret = 0;
1916 label_return:
1917 	return (ret);
1918 }
1919 
1920 static int
prof_dump_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1921 prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1922     void *newp, size_t newlen)
1923 {
1924 	int ret;
1925 	const char *filename = NULL;
1926 
1927 	if (!config_prof)
1928 		return (ENOENT);
1929 
1930 	WRITEONLY();
1931 	WRITE(filename, const char *);
1932 
1933 	if (prof_mdump(filename)) {
1934 		ret = EFAULT;
1935 		goto label_return;
1936 	}
1937 
1938 	ret = 0;
1939 label_return:
1940 	return (ret);
1941 }
1942 
1943 static int
prof_gdump_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1944 prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1945     void *newp, size_t newlen)
1946 {
1947 	int ret;
1948 	bool oldval;
1949 
1950 	if (!config_prof)
1951 		return (ENOENT);
1952 
1953 	if (newp != NULL) {
1954 		if (newlen != sizeof(bool)) {
1955 			ret = EINVAL;
1956 			goto label_return;
1957 		}
1958 		oldval = prof_gdump_set(*(bool *)newp);
1959 	} else
1960 		oldval = prof_gdump_get();
1961 	READ(oldval, bool);
1962 
1963 	ret = 0;
1964 label_return:
1965 	return (ret);
1966 }
1967 
1968 static int
prof_reset_ctl(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1969 prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1970     void *newp, size_t newlen)
1971 {
1972 	int ret;
1973 	size_t lg_sample = lg_prof_sample;
1974 	tsd_t *tsd;
1975 
1976 	if (!config_prof)
1977 		return (ENOENT);
1978 
1979 	WRITEONLY();
1980 	WRITE(lg_sample, size_t);
1981 	if (lg_sample >= (sizeof(uint64_t) << 3))
1982 		lg_sample = (sizeof(uint64_t) << 3) - 1;
1983 
1984 	tsd = tsd_fetch();
1985 
1986 	prof_reset(tsd, lg_sample);
1987 
1988 	ret = 0;
1989 label_return:
1990 	return (ret);
1991 }
1992 
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)1993 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
1994 CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
1995 
1996 /******************************************************************************/
1997 
1998 CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
1999 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
2000 CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
2001 CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
2002 CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
2003 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
2004 
2005 CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
2006 CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
2007     ssize_t)
2008 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
2009 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
2010 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
2011 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2012     ctl_stats.arenas[mib[2]].astats.mapped, size_t)
2013 CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
2014     ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
2015 CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
2016     ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
2017 CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
2018     ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
2019 CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
2020     ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
2021 CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
2022     ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
2023 
2024 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2025     ctl_stats.arenas[mib[2]].allocated_small, size_t)
2026 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2027     ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
2028 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2029     ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
2030 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2031     ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
2032 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2033     ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
2034 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2035     ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
2036 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2037     ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
2038 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2039     ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
2040 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
2041     ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
2042 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
2043     ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
2044 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
2045     ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
2046 CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
2047     ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
2048 
2049 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2050     ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
2051 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2052     ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
2053 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2054     ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
2055 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2056     ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
2057 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
2058     ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
2059 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
2060     ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
2061 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
2062     ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
2063 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
2064     ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
2065 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
2066     ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
2067 
2068 static const ctl_named_node_t *
2069 stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
2070 {
2071 
2072 	if (j > NBINS)
2073 		return (NULL);
2074 	return (super_stats_arenas_i_bins_j_node);
2075 }
2076 
2077 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
2078     ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
2079 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
2080     ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
2081 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
2082     ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
2083 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
2084     ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
2085 
2086 static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t * mib,size_t miblen,size_t j)2087 stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
2088 {
2089 
2090 	if (j > nlclasses)
2091 		return (NULL);
2092 	return (super_stats_arenas_i_lruns_j_node);
2093 }
2094 
2095 CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
2096     ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
2097 CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
2098     ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
2099 CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
2100     ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
2101     uint64_t)
2102 CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
2103     ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
2104 
2105 static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(const size_t * mib,size_t miblen,size_t j)2106 stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
2107 {
2108 
2109 	if (j > nhclasses)
2110 		return (NULL);
2111 	return (super_stats_arenas_i_hchunks_j_node);
2112 }
2113 
2114 static const ctl_named_node_t *
stats_arenas_i_index(const size_t * mib,size_t miblen,size_t i)2115 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
2116 {
2117 	const ctl_named_node_t * ret;
2118 
2119 	malloc_mutex_lock(&ctl_mtx);
2120 	if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
2121 		ret = NULL;
2122 		goto label_return;
2123 	}
2124 
2125 	ret = super_stats_arenas_i_node;
2126 label_return:
2127 	malloc_mutex_unlock(&ctl_mtx);
2128 	return (ret);
2129 }
2130