1 /*
2  * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 package test.java.util.Map;
24 
25 import org.testng.annotations.DataProvider;
26 import org.testng.annotations.Test;
27 
28 import java.util.Collection;
29 import java.util.HashMap;
30 import java.util.LinkedHashMap;
31 import java.util.Map;
32 import java.util.concurrent.ConcurrentHashMap;
33 import java.util.function.BiConsumer;
34 import java.util.stream.Collector;
35 import java.util.stream.Collectors;
36 import java.util.stream.IntStream;
37 
38 import static org.testng.Assert.assertEquals;
39 
40 /*
41  * @test
42  * @bug 8023463
43  * @summary Test the case where a bin is treeified and vice verser
44  * @run testng MapBinToFromTreeTest
45  */
46 
47 @Test
48 public class MapBinToFromTreeTest {
49 
50     // Initial capacity of map
51     // Should be >= the map capacity for treeifiying, see HashMap/ConcurrentMap.MIN_TREEIFY_CAPACITY
52     static final int INITIAL_CAPACITY = 64;
53 
54     // Maximum size of map
55     // Should be > the treeify threshold, see HashMap/ConcurrentMap.TREEIFY_THRESHOLD
56     // Should be > INITIAL_CAPACITY to ensure resize occurs
57     static final int SIZE = 256;
58 
59     // Load factor of map
60     // A value 1.0 will ensure that a new threshold == capacity
61     static final float LOAD_FACTOR = 1.0f;
62 
63     @DataProvider(name = "maps")
mapProvider()64     static Object[][] mapProvider() {
65         return new Object[][] {
66                 // Pass in the class name as a description for test reporting
67                 // purposes
68                 { HashMap.class.getName(), new HashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
69                 { LinkedHashMap.class.getName(), new LinkedHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
70                 { ConcurrentHashMap.class.getName(), new ConcurrentHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
71         };
72     }
73 
74     @Test(dataProvider = "maps")
testPutThenGet(String d, Map<HashCodeInteger, Integer> m)75     public void testPutThenGet(String d, Map<HashCodeInteger, Integer> m) {
76         put(SIZE, m, (i, s) -> {
77             for (int j = 0; j < s; j++) {
78                 assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
79                         String.format("Map.get(%d)", j));
80             }
81         });
82     }
83 
84     @Test(dataProvider = "maps")
testPutThenTraverse(String d, Map<HashCodeInteger, Integer> m)85     public void testPutThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
86         Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
87 
88         put(SIZE, m, (i, s) -> {
89             // Note that it is OK to collect to a Set (HashSet) as long as
90             // integer values are used since these tests only check for
91             // collisions and other tests will verify more general functionality
92             Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
93             Collection<Integer> expected = IntStream.range(0, s).boxed().collect(c);
94             assertEquals(actual, expected, "Map.keySet()");
95         });
96     }
97 
98     @Test(dataProvider = "maps")
testRemoveThenGet(String d, Map<HashCodeInteger, Integer> m)99     public void testRemoveThenGet(String d, Map<HashCodeInteger, Integer> m) {
100         put(SIZE, m, (i, s) -> { });
101 
102         remove(m, (i, s) -> {
103             for (int j = i + 1; j < SIZE; j++) {
104                 assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
105                         String.format("Map.get(%d)", j));
106             }
107         });
108     }
109 
110     @Test(dataProvider = "maps")
testRemoveThenTraverse(String d, Map<HashCodeInteger, Integer> m)111     public void testRemoveThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
112         put(SIZE, m, (i, s) -> { });
113 
114         Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
115 
116         remove(m, (i, s) -> {
117             Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
118             Collection<Integer> expected = IntStream.range(i + 1, SIZE).boxed().collect(c);
119             assertEquals(actual, expected, "Map.keySet()");
120         });
121     }
122 
123     @Test(dataProvider = "maps")
testUntreeifyOnResizeWithGet(String d, Map<HashCodeInteger, Integer> m)124     public void testUntreeifyOnResizeWithGet(String d, Map<HashCodeInteger, Integer> m) {
125         // Fill the map with 64 entries grouped into 4 buckets
126         put(INITIAL_CAPACITY, m, (i, s) -> { });
127 
128         for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
129             // Add further entries in the 0'th bucket so as not to disturb
130             // other buckets, entries of which may be distributed and/or
131             // the bucket untreeified on resize
132             m.put(new HashCodeInteger(i, 0), i);
133 
134             for (int j = 0; j < INITIAL_CAPACITY; j++) {
135                 assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
136                         String.format("Map.get(%d) < INITIAL_CAPACITY", j));
137             }
138             for (int j = INITIAL_CAPACITY; j <= i; j++) {
139                 assertEquals(m.get(new HashCodeInteger(j, 0)).intValue(), j,
140                         String.format("Map.get(%d) >= INITIAL_CAPACITY", j));
141             }
142         }
143     }
144 
145     @Test(dataProvider = "maps")
testUntreeifyOnResizeWithTraverse(String d, Map<HashCodeInteger, Integer> m)146     public void testUntreeifyOnResizeWithTraverse(String d, Map<HashCodeInteger, Integer> m) {
147         // Fill the map with 64 entries grouped into 4 buckets
148         put(INITIAL_CAPACITY, m, (i, s) -> { });
149 
150         Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
151 
152         for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
153             // Add further entries in the 0'th bucket so as not to disturb
154             // other buckets, entries of which may be distributed and/or
155             // the bucket untreeified on resize
156             m.put(new HashCodeInteger(i, 0), i);
157 
158             Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
159             Collection<Integer> expected = IntStream.rangeClosed(0, i).boxed().collect(c);
160             assertEquals(actual, expected, "Key set");
161         }
162     }
163 
getCollector(Map<?, ?> m)164     Collector<Integer, ?, ? extends Collection<Integer>> getCollector(Map<?, ?> m) {
165         Collector<Integer, ?, ? extends Collection<Integer>> collector = m instanceof LinkedHashMap
166                 ? Collectors.toList()
167                 : Collectors.toSet();
168         return collector;
169     }
170 
put(int size, Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c)171     void put(int size, Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
172         for (int i = 0; i < size; i++) {
173             m.put(new HashCodeInteger(i), i);
174 
175             c.accept(i, m.size());
176         }
177     }
178 
remove(Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c)179     void remove(Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
180         int size = m.size();
181         // Remove all elements thus ensuring at some point trees will be
182         // converting back to bins
183         for (int i = 0; i < size; i++) {
184             m.remove(new HashCodeInteger(i));
185 
186             c.accept(i, m.size());
187         }
188     }
189 
190     static final class HashCodeInteger implements Comparable<HashCodeInteger> {
191         final int value;
192 
193         final int hashcode;
194 
HashCodeInteger(int value)195         HashCodeInteger(int value) {
196             this(value, hash(value));
197         }
198 
HashCodeInteger(int value, int hashcode)199         HashCodeInteger(int value, int hashcode) {
200             this.value = value;
201             this.hashcode = hashcode;
202         }
203 
hash(int i)204         static int hash(int i) {
205             // Assuming 64 entries with keys from 0 to 63 then a map:
206             // - of capacity 64 will have 4 buckets with 16 entries per-bucket
207             // - of capacity 128 will have 8 buckets with 8 entries per-bucket
208             // - of capacity 256 will have 16 buckets with 4 entries per-bucket
209             //
210             // Re-sizing will result in re-distribution, doubling the buckets
211             // and reducing the entries by half. This will result in
212             // untreeifying when the number of entries is less than untreeify
213             // threshold (see HashMap/ConcurrentMap.UNTREEIFY_THRESHOLD)
214             return (i % 4) + (i / 4) * INITIAL_CAPACITY;
215         }
216 
217         @Override
equals(Object obj)218         public boolean equals(Object obj) {
219             if (obj instanceof HashCodeInteger) {
220                 HashCodeInteger other = (HashCodeInteger) obj;
221                 return other.value == value;
222             }
223             return false;
224         }
225 
226         @Override
hashCode()227         public int hashCode() {
228             return hashcode;
229         }
230 
231         @Override
compareTo(HashCodeInteger o)232         public int compareTo(HashCodeInteger o) {
233             return value - o.value;
234         }
235 
236         @Override
toString()237         public String toString() {
238             return Integer.toString(value);
239         }
240     }
241 }