View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.master.balancer;
20  
21  import static org.junit.Assert.assertFalse;
22  import static org.junit.Assert.assertTrue;
23  
24  import java.util.ArrayList;
25  import java.util.HashMap;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.SortedMap;
29  import java.util.TreeMap;
30  
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.hbase.TableName;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.testclassification.SmallTests;
35  import org.apache.hadoop.hbase.ServerName;
36  import org.apache.hadoop.hbase.master.RackManager;
37  import org.apache.hadoop.hbase.util.Bytes;
38  import org.apache.hadoop.hbase.util.Triple;
39  import org.junit.BeforeClass;
40  import org.junit.Test;
41  import org.junit.Ignore;
42  import org.junit.experimental.categories.Category;
43  import org.mockito.Mockito;
44  
45  @Category(SmallTests.class)
46  public class TestFavoredNodeAssignmentHelper {
47  
48    private static List<ServerName> servers = new ArrayList<ServerName>();
49    private static Map<String, List<ServerName>> rackToServers = new HashMap<String,
50        List<ServerName>>();
51    private static RackManager rackManager = Mockito.mock(RackManager.class);
52  
53    @BeforeClass
54    public static void setupBeforeClass() throws Exception {
55      // Set up some server -> rack mappings
56      // Have three racks in the cluster with 10 hosts each.
57      for (int i = 0; i < 40; i++) {
58        ServerName server = ServerName.valueOf("foo" + i + ":1234", -1);
59        if (i < 10) {
60          Mockito.when(rackManager.getRack(server)).thenReturn("rack1");
61          if (rackToServers.get("rack1") == null) {
62            List<ServerName> servers = new ArrayList<ServerName>();
63            rackToServers.put("rack1", servers);
64          }
65          rackToServers.get("rack1").add(server);
66        }
67        if (i >= 10 && i < 20) {
68          Mockito.when(rackManager.getRack(server)).thenReturn("rack2");
69          if (rackToServers.get("rack2") == null) {
70            List<ServerName> servers = new ArrayList<ServerName>();
71            rackToServers.put("rack2", servers);
72          }
73          rackToServers.get("rack2").add(server);
74        }
75        if (i >= 20 && i < 30) {
76          Mockito.when(rackManager.getRack(server)).thenReturn("rack3");
77          if (rackToServers.get("rack3") == null) {
78            List<ServerName> servers = new ArrayList<ServerName>();
79            rackToServers.put("rack3", servers);
80          }
81          rackToServers.get("rack3").add(server);
82        }
83        servers.add(server);
84      }
85    }
86  
87    // The tests decide which racks to work with, and how many machines to
88    // work with from any given rack
89    // Return a rondom 'count' number of servers from 'rack'
90    private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) {
91      List<ServerName> chosenServers = new ArrayList<ServerName>();
92      for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) {
93        List<ServerName> servers = rackToServers.get(entry.getKey());
94        for (int i = 0; i < entry.getValue(); i++) {
95          chosenServers.add(servers.get(i));
96        }
97      }
98      return chosenServers;
99    }
100 
101   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
102   public void testSmallCluster() {
103     // Test the case where we cannot assign favored nodes (because the number
104     // of nodes in the cluster is too less)
105     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
106     rackToServerCount.put("rack1", 2);
107     List<ServerName> servers = getServersFromRack(rackToServerCount);
108     FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
109         new Configuration());
110     assertFalse(helper.canPlaceFavoredNodes());
111   }
112 
113   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
114   public void testPlacePrimaryRSAsRoundRobin() {
115     // Test the regular case where there are many servers in different racks
116     // Test once for few regions and once for many regions
117     primaryRSPlacement(6, null, 10, 10, 10);
118     // now create lots of regions and try to place them on the limited number of machines
119     primaryRSPlacement(600, null, 10, 10, 10);
120   }
121   
122   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
123   public void testRoundRobinAssignmentsWithUnevenSizedRacks() {
124     //In the case of uneven racks, the regions should be distributed 
125     //proportionately to the rack sizes
126     primaryRSPlacement(6, null, 10, 10, 10);
127     primaryRSPlacement(600, null, 10, 10, 5);
128     primaryRSPlacement(600, null, 10, 5, 10);
129     primaryRSPlacement(600, null, 5, 10, 10);
130     primaryRSPlacement(500, null, 10, 10, 5);
131     primaryRSPlacement(500, null, 10, 5, 10);
132     primaryRSPlacement(500, null, 5, 10, 10);
133     primaryRSPlacement(500, null, 9, 7, 8);
134     primaryRSPlacement(500, null, 8, 7, 9);
135     primaryRSPlacement(500, null, 7, 9, 8);
136     primaryRSPlacement(459, null, 7, 9, 8);
137   }
138 
139   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
140   public void testSecondaryAndTertiaryPlacementWithSingleRack() {
141     // Test the case where there is a single rack and we need to choose
142     // Primary/Secondary/Tertiary from a single rack.
143     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
144     rackToServerCount.put("rack1", 10);
145     // have lots of regions to test with
146     Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
147       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
148     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
149     Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
150     List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
151     Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
152         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
153     // although we created lots of regions we should have no overlap on the
154     // primary/secondary/tertiary for any given region
155     for (HRegionInfo region : regions) {
156       ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
157       assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
158       assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
159       assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
160     }
161   }
162 
163   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
164   public void testSecondaryAndTertiaryPlacementWithSingleServer() {
165     // Test the case where we have a single node in the cluster. In this case
166     // the primary can be assigned but the secondary/tertiary would be null
167     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
168     rackToServerCount.put("rack1", 1);
169     Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
170       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
171     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
172     Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
173     List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
174 
175     Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
176         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
177     // no secondary/tertiary placement in case of a single RegionServer
178     assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
179   }
180 
181   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
182   public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
183     // Test the case where we have multiple racks and the region servers
184     // belong to multiple racks
185     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
186     rackToServerCount.put("rack1", 10);
187     rackToServerCount.put("rack2", 10);
188 
189     Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
190       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
191     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
192     Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
193 
194     assertTrue(primaryRSMap.size() == 60000);
195     Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
196         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
197     assertTrue(secondaryAndTertiaryMap.size() == 60000);
198     // for every region, the primary should be on one rack and the secondary/tertiary
199     // on another (we create a lot of regions just to increase probability of failure)
200     for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
201       ServerName[] allServersForRegion = entry.getValue();
202       String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
203       String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
204       String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
205       assertTrue(!primaryRSRack.equals(secondaryRSRack));
206       assertTrue(secondaryRSRack.equals(tertiaryRSRack));
207     }
208   }
209 
210   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
211   public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
212     // Test the case where we have two racks but with less than two servers in each
213     // We will not have enough machines to select secondary/tertiary
214     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
215     rackToServerCount.put("rack1", 1);
216     rackToServerCount.put("rack2", 1);
217     Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
218       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
219     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
220     Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
221     List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
222     assertTrue(primaryRSMap.size() == 6);
223     Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
224           helper.placeSecondaryAndTertiaryRS(primaryRSMap);
225     for (HRegionInfo region : regions) {
226       // not enough secondary/tertiary room to place the regions
227       assertTrue(secondaryAndTertiaryMap.get(region) == null);
228     }
229   }
230 
231   @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
232   public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
233     // Test the case where there is only one server in one rack and another rack
234     // has more servers. We try to choose secondary/tertiary on different
235     // racks than what the primary is on. But if the other rack doesn't have
236     // enough nodes to have both secondary/tertiary RSs, the tertiary is placed
237     // on the same rack as the primary server is on
238     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
239     rackToServerCount.put("rack1", 2);
240     rackToServerCount.put("rack2", 1);
241     Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
242       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
243     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
244     Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
245     List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
246     assertTrue(primaryRSMap.size() == 6);
247     Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
248           helper.placeSecondaryAndTertiaryRS(primaryRSMap);
249     for (HRegionInfo region : regions) {
250       ServerName s = primaryRSMap.get(region);
251       ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
252       ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
253       if (rackManager.getRack(s).equals("rack1")) {
254         assertTrue(rackManager.getRack(secondaryRS).equals("rack2") &&
255             rackManager.getRack(tertiaryRS).equals("rack1"));
256       }
257       if (rackManager.getRack(s).equals("rack2")) {
258         assertTrue(rackManager.getRack(secondaryRS).equals("rack1") &&
259             rackManager.getRack(tertiaryRS).equals("rack1"));
260       }
261     }
262   }
263 
264   private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
265   secondaryAndTertiaryRSPlacementHelper(
266       int regionCount, Map<String, Integer> rackToServerCount) {
267     Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
268     List<ServerName> servers = getServersFromRack(rackToServerCount);
269     FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
270     Map<ServerName, List<HRegionInfo>> assignmentMap =
271         new HashMap<ServerName, List<HRegionInfo>>();
272     helper.initialize();
273     // create regions
274     List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
275     for (int i = 0; i < regionCount; i++) {
276       HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
277           Bytes.toBytes(i), Bytes.toBytes(i + 1));
278       regions.add(region);
279     }
280     // place the regions
281     helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
282     return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
283                    (primaryRSMap, helper, regions);
284   }
285 
286   private void primaryRSPlacement(int regionCount, Map<HRegionInfo, ServerName> primaryRSMap,
287       int firstRackSize, int secondRackSize, int thirdRackSize) {
288     Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
289     rackToServerCount.put("rack1", firstRackSize);
290     rackToServerCount.put("rack2", secondRackSize);
291     rackToServerCount.put("rack3", thirdRackSize);
292     List<ServerName> servers = getServersFromRack(rackToServerCount);
293     FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
294         rackManager);
295     helper.initialize();
296 
297     assertTrue(helper.canPlaceFavoredNodes());
298 
299     Map<ServerName, List<HRegionInfo>> assignmentMap =
300         new HashMap<ServerName, List<HRegionInfo>>();
301     if (primaryRSMap == null) primaryRSMap = new HashMap<HRegionInfo, ServerName>();
302     // create some regions
303     List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
304     for (int i = 0; i < regionCount; i++) {
305       HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
306           Bytes.toBytes(i), Bytes.toBytes(i + 1));
307       regions.add(region);
308     }
309     // place those regions in primary RSs
310     helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
311 
312     // we should have all the regions nicely spread across the racks
313     int regionsOnRack1 = 0;
314     int regionsOnRack2 = 0;
315     int regionsOnRack3 = 0;
316     for (HRegionInfo region : regions) {
317       if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) {
318         regionsOnRack1++;
319       } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) {
320         regionsOnRack2++;
321       } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack3")) {
322         regionsOnRack3++;
323       }
324     }
325     // Verify that the regions got placed in the way we expect (documented in
326     // FavoredNodeAssignmentHelper#placePrimaryRSAsRoundRobin)
327     checkNumRegions(regionCount, firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
328         regionsOnRack2, regionsOnRack3, assignmentMap);
329   }
330 
331   private void checkNumRegions(int regionCount, int firstRackSize, int secondRackSize,
332       int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3,
333       Map<ServerName, List<HRegionInfo>> assignmentMap) {
334     //The regions should be distributed proportionately to the racksizes
335     //Verify the ordering was as expected by inserting the racks and regions
336     //in sorted maps. The keys being the racksize and numregions; values are
337     //the relative positions of the racksizes and numregions respectively
338     SortedMap<Integer, Integer> rackMap = new TreeMap<Integer, Integer>();
339     rackMap.put(firstRackSize, 1);
340     rackMap.put(secondRackSize, 2);
341     rackMap.put(thirdRackSize, 3);
342     SortedMap<Integer, Integer> regionMap = new TreeMap<Integer, Integer>();
343     regionMap.put(regionsOnRack1, 1);
344     regionMap.put(regionsOnRack2, 2);
345     regionMap.put(regionsOnRack3, 3);
346     assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize,
347         regionsOnRack1, regionsOnRack2, regionsOnRack3),
348         rackMap.get(firstRackSize) == regionMap.get(regionsOnRack1));
349     assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize,
350         regionsOnRack1, regionsOnRack2, regionsOnRack3),
351         rackMap.get(secondRackSize) == regionMap.get(regionsOnRack2));
352     assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize,
353         regionsOnRack1, regionsOnRack2, regionsOnRack3),
354         rackMap.get(thirdRackSize) == regionMap.get(regionsOnRack3));
355   }
356 
357   private String printProportions(int firstRackSize, int secondRackSize,
358       int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) {
359     return "The rack sizes " + firstRackSize + " " + secondRackSize
360         + " " + thirdRackSize + " " + regionsOnRack1 + " " + regionsOnRack2 +
361         " " + regionsOnRack3;
362   }
363 }