• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  #define JEMALLOC_RTREE_C_
2  #include "jemalloc/internal/jemalloc_preamble.h"
3  #include "jemalloc/internal/jemalloc_internal_includes.h"
4  
5  #include "jemalloc/internal/assert.h"
6  #include "jemalloc/internal/mutex.h"
7  
8  /*
9   * Only the most significant bits of keys passed to rtree_{read,write}() are
10   * used.
11   */
12  bool
rtree_new(rtree_t * rtree,bool zeroed)13  rtree_new(rtree_t *rtree, bool zeroed) {
14  #ifdef JEMALLOC_JET
15  	if (!zeroed) {
16  		memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
17  	}
18  #else
19  	assert(zeroed);
20  #endif
21  
22  	if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
23  	    malloc_mutex_rank_exclusive)) {
24  		return true;
25  	}
26  
27  	return false;
28  }
29  
30  static rtree_node_elm_t *
rtree_node_alloc_impl(tsdn_t * tsdn,rtree_t * rtree,size_t nelms)31  rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
32  	return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
33  	    sizeof(rtree_node_elm_t), CACHELINE);
34  }
35  rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
36  
37  static void
rtree_node_dalloc_impl(tsdn_t * tsdn,rtree_t * rtree,rtree_node_elm_t * node)38  rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
39  	/* Nodes are never deleted during normal operation. */
40  	not_reached();
41  }
42  UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
43      rtree_node_dalloc_impl;
44  
45  static rtree_leaf_elm_t *
rtree_leaf_alloc_impl(tsdn_t * tsdn,rtree_t * rtree,size_t nelms)46  rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
47  	return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
48  	    sizeof(rtree_leaf_elm_t), CACHELINE);
49  }
50  rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
51  
52  static void
rtree_leaf_dalloc_impl(tsdn_t * tsdn,rtree_t * rtree,rtree_leaf_elm_t * leaf)53  rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
54  	/* Leaves are never deleted during normal operation. */
55  	not_reached();
56  }
57  UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
58      rtree_leaf_dalloc_impl;
59  
60  #ifdef JEMALLOC_JET
61  #  if RTREE_HEIGHT > 1
62  static void
rtree_delete_subtree(tsdn_t * tsdn,rtree_t * rtree,rtree_node_elm_t * subtree,unsigned level)63  rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
64      unsigned level) {
65  	size_t nchildren = ZU(1) << rtree_levels[level].bits;
66  	if (level + 2 < RTREE_HEIGHT) {
67  		for (size_t i = 0; i < nchildren; i++) {
68  			rtree_node_elm_t *node =
69  			    (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
70  			    ATOMIC_RELAXED);
71  			if (node != NULL) {
72  				rtree_delete_subtree(tsdn, rtree, node, level +
73  				    1);
74  			}
75  		}
76  	} else {
77  		for (size_t i = 0; i < nchildren; i++) {
78  			rtree_leaf_elm_t *leaf =
79  			    (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
80  			    ATOMIC_RELAXED);
81  			if (leaf != NULL) {
82  				rtree_leaf_dalloc(tsdn, rtree, leaf);
83  			}
84  		}
85  	}
86  
87  	if (subtree != rtree->root) {
88  		rtree_node_dalloc(tsdn, rtree, subtree);
89  	}
90  }
91  #  endif
92  
93  void
rtree_delete(tsdn_t * tsdn,rtree_t * rtree)94  rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
95  #  if RTREE_HEIGHT > 1
96  	rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
97  #  endif
98  }
99  #endif
100  
101  static rtree_node_elm_t *
rtree_node_init(tsdn_t * tsdn,rtree_t * rtree,unsigned level,atomic_p_t * elmp)102  rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
103      atomic_p_t *elmp) {
104  	malloc_mutex_lock(tsdn, &rtree->init_lock);
105  	/*
106  	 * If *elmp is non-null, then it was initialized with the init lock
107  	 * held, so we can get by with 'relaxed' here.
108  	 */
109  	rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
110  	if (node == NULL) {
111  		node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
112  		    rtree_levels[level].bits);
113  		if (node == NULL) {
114  			malloc_mutex_unlock(tsdn, &rtree->init_lock);
115  			return NULL;
116  		}
117  		/*
118  		 * Even though we hold the lock, a later reader might not; we
119  		 * need release semantics.
120  		 */
121  		atomic_store_p(elmp, node, ATOMIC_RELEASE);
122  	}
123  	malloc_mutex_unlock(tsdn, &rtree->init_lock);
124  
125  	return node;
126  }
127  
128  static rtree_leaf_elm_t *
rtree_leaf_init(tsdn_t * tsdn,rtree_t * rtree,atomic_p_t * elmp)129  rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
130  	malloc_mutex_lock(tsdn, &rtree->init_lock);
131  	/*
132  	 * If *elmp is non-null, then it was initialized with the init lock
133  	 * held, so we can get by with 'relaxed' here.
134  	 */
135  	rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
136  	if (leaf == NULL) {
137  		leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
138  		    rtree_levels[RTREE_HEIGHT-1].bits);
139  		if (leaf == NULL) {
140  			malloc_mutex_unlock(tsdn, &rtree->init_lock);
141  			return NULL;
142  		}
143  		/*
144  		 * Even though we hold the lock, a later reader might not; we
145  		 * need release semantics.
146  		 */
147  		atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
148  	}
149  	malloc_mutex_unlock(tsdn, &rtree->init_lock);
150  
151  	return leaf;
152  }
153  
154  static bool
rtree_node_valid(rtree_node_elm_t * node)155  rtree_node_valid(rtree_node_elm_t *node) {
156  	return ((uintptr_t)node != (uintptr_t)0);
157  }
158  
159  static bool
rtree_leaf_valid(rtree_leaf_elm_t * leaf)160  rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
161  	return ((uintptr_t)leaf != (uintptr_t)0);
162  }
163  
164  static rtree_node_elm_t *
rtree_child_node_tryread(rtree_node_elm_t * elm,bool dependent)165  rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
166  	rtree_node_elm_t *node;
167  
168  	if (dependent) {
169  		node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
170  		    ATOMIC_RELAXED);
171  	} else {
172  		node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
173  		    ATOMIC_ACQUIRE);
174  	}
175  
176  	assert(!dependent || node != NULL);
177  	return node;
178  }
179  
180  static rtree_node_elm_t *
rtree_child_node_read(tsdn_t * tsdn,rtree_t * rtree,rtree_node_elm_t * elm,unsigned level,bool dependent)181  rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
182      unsigned level, bool dependent) {
183  	rtree_node_elm_t *node;
184  
185  	node = rtree_child_node_tryread(elm, dependent);
186  	if (!dependent && unlikely(!rtree_node_valid(node))) {
187  		node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
188  	}
189  	assert(!dependent || node != NULL);
190  	return node;
191  }
192  
193  static rtree_leaf_elm_t *
rtree_child_leaf_tryread(rtree_node_elm_t * elm,bool dependent)194  rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
195  	rtree_leaf_elm_t *leaf;
196  
197  	if (dependent) {
198  		leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
199  		    ATOMIC_RELAXED);
200  	} else {
201  		leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
202  		    ATOMIC_ACQUIRE);
203  	}
204  
205  	assert(!dependent || leaf != NULL);
206  	return leaf;
207  }
208  
209  static rtree_leaf_elm_t *
rtree_child_leaf_read(tsdn_t * tsdn,rtree_t * rtree,rtree_node_elm_t * elm,unsigned level,bool dependent)210  rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
211      unsigned level, bool dependent) {
212  	rtree_leaf_elm_t *leaf;
213  
214  	leaf = rtree_child_leaf_tryread(elm, dependent);
215  	if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
216  		leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
217  	}
218  	assert(!dependent || leaf != NULL);
219  	return leaf;
220  }
221  
222  rtree_leaf_elm_t *
rtree_leaf_elm_lookup_hard(tsdn_t * tsdn,rtree_t * rtree,rtree_ctx_t * rtree_ctx,uintptr_t key,bool dependent,bool init_missing)223  rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
224      uintptr_t key, bool dependent, bool init_missing) {
225  	rtree_node_elm_t *node;
226  	rtree_leaf_elm_t *leaf;
227  #if RTREE_HEIGHT > 1
228  	node = rtree->root;
229  #else
230  	leaf = rtree->root;
231  #endif
232  
233  	if (config_debug) {
234  		uintptr_t leafkey = rtree_leafkey(key);
235  		for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
236  			assert(rtree_ctx->cache[i].leafkey != leafkey);
237  		}
238  		for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
239  			assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
240  		}
241  	}
242  
243  #define RTREE_GET_CHILD(level) {					\
244  		assert(level < RTREE_HEIGHT-1);				\
245  		/* ANDROID CHANGE: Bad pointers return NULL */		\
246  		/* if (level != 0 && !dependent && */			\
247  		/*    unlikely(!rtree_node_valid(node))) { */		\
248  		if (unlikely(!rtree_node_valid(node))) {		\
249  		/* ANDROID END CHANGE */				\
250  			return NULL;					\
251  		}							\
252  		uintptr_t subkey = rtree_subkey(key, level);		\
253  		if (level + 2 < RTREE_HEIGHT) {				\
254  			node = init_missing ?				\
255  			    rtree_child_node_read(tsdn, rtree,		\
256  			    &node[subkey], level, dependent) :		\
257  			    rtree_child_node_tryread(&node[subkey],	\
258  			    dependent);					\
259  		} else {						\
260  			leaf = init_missing ?				\
261  			    rtree_child_leaf_read(tsdn, rtree,		\
262  			    &node[subkey], level, dependent) :		\
263  			    rtree_child_leaf_tryread(&node[subkey],	\
264  			    dependent);					\
265  		}							\
266  	}
267  	/*
268  	 * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
269  	 * (1) evict last entry in L2 cache; (2) move the collision slot from L1
270  	 * cache down to L2; and 3) fill L1.
271  	 */
272  #define RTREE_GET_LEAF(level) {						\
273  		assert(level == RTREE_HEIGHT-1);			\
274  		/* ANDROID CHANGE: Bad pointers return NULL */		\
275  		/* if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {*/	\
276  		if (unlikely(!rtree_leaf_valid(leaf))) {		\
277  		/* ANDROID END CHANGE */				\
278  			return NULL;					\
279  		}							\
280  		if (RTREE_CTX_NCACHE_L2 > 1) {				\
281  			memmove(&rtree_ctx->l2_cache[1],		\
282  			    &rtree_ctx->l2_cache[0],			\
283  			    sizeof(rtree_ctx_cache_elm_t) *		\
284  			    (RTREE_CTX_NCACHE_L2 - 1));			\
285  		}							\
286  		size_t slot = rtree_cache_direct_map(key);		\
287  		rtree_ctx->l2_cache[0].leafkey =			\
288  		    rtree_ctx->cache[slot].leafkey;			\
289  		rtree_ctx->l2_cache[0].leaf =				\
290  		    rtree_ctx->cache[slot].leaf;			\
291  		uintptr_t leafkey = rtree_leafkey(key);			\
292  		rtree_ctx->cache[slot].leafkey = leafkey;		\
293  		rtree_ctx->cache[slot].leaf = leaf;			\
294  		uintptr_t subkey = rtree_subkey(key, level);		\
295  		return &leaf[subkey];					\
296  	}
297  	if (RTREE_HEIGHT > 1) {
298  		RTREE_GET_CHILD(0)
299  	}
300  	if (RTREE_HEIGHT > 2) {
301  		RTREE_GET_CHILD(1)
302  	}
303  	if (RTREE_HEIGHT > 3) {
304  		for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
305  			RTREE_GET_CHILD(i)
306  		}
307  	}
308  	RTREE_GET_LEAF(RTREE_HEIGHT-1)
309  #undef RTREE_GET_CHILD
310  #undef RTREE_GET_LEAF
311  	not_reached();
312  }
313  
314  void
rtree_ctx_data_init(rtree_ctx_t * ctx)315  rtree_ctx_data_init(rtree_ctx_t *ctx) {
316  	for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
317  		rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
318  		cache->leafkey = RTREE_LEAFKEY_INVALID;
319  		cache->leaf = NULL;
320  	}
321  	for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
322  		rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
323  		cache->leafkey = RTREE_LEAFKEY_INVALID;
324  		cache->leaf = NULL;
325  	}
326  }
327