View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertNull;
25  import static org.junit.Assert.assertTrue;
26  import static org.junit.Assert.fail;
27  
28  import java.io.IOException;
29  import java.lang.reflect.InvocationTargetException;
30  import java.lang.reflect.Method;
31  import java.util.List;
32  import java.util.Map;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.hbase.client.Admin;
38  import org.apache.hadoop.hbase.client.Get;
39  import org.apache.hadoop.hbase.client.HConnection;
40  import org.apache.hadoop.hbase.client.HConnectionManager;
41  import org.apache.hadoop.hbase.client.HTable;
42  import org.apache.hadoop.hbase.client.Put;
43  import org.apache.hadoop.hbase.client.Result;
44  import org.apache.hadoop.hbase.client.ResultScanner;
45  import org.apache.hadoop.hbase.client.Scan;
46  import org.apache.hadoop.hbase.client.Table;
47  import org.apache.hadoop.hbase.coordination.ZkSplitLogWorkerCoordination;
48  import org.apache.hadoop.hbase.master.HMaster;
49  import org.apache.hadoop.hbase.master.LoadBalancer;
50  import org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer;
51  import org.apache.hadoop.hbase.testclassification.LargeTests;
52  import org.apache.hadoop.hbase.util.Bytes;
53  import org.apache.hadoop.hbase.util.FSUtils;
54  import org.apache.hadoop.hbase.util.Threads;
55  import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
56  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
57  import org.apache.hadoop.hbase.zookeeper.ZKConfig;
58  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
59  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
60  import org.apache.zookeeper.CreateMode;
61  import org.apache.zookeeper.KeeperException;
62  import org.apache.zookeeper.ZooDefs;
63  import org.apache.zookeeper.ZooKeeper;
64  import org.apache.zookeeper.ZooKeeper.States;
65  import org.apache.zookeeper.data.ACL;
66  import org.apache.zookeeper.data.Stat;
67  import org.junit.After;
68  import org.junit.AfterClass;
69  import org.junit.Assert;
70  import org.junit.Before;
71  import org.junit.BeforeClass;
72  import org.junit.Test;
73  import org.junit.experimental.categories.Category;
74  
75  
76  
77  @Category(LargeTests.class)
78  public class TestZooKeeper {
79    private final Log LOG = LogFactory.getLog(this.getClass());
80  
81    private final static HBaseTestingUtility
82        TEST_UTIL = new HBaseTestingUtility();
83  
84    /**
85     * @throws java.lang.Exception
86     */
87    @BeforeClass
88    public static void setUpBeforeClass() throws Exception {
89      // Test we can first start the ZK cluster by itself
90      Configuration conf = TEST_UTIL.getConfiguration();
91      TEST_UTIL.startMiniDFSCluster(2);
92      TEST_UTIL.startMiniZKCluster();
93      conf.setBoolean("dfs.support.append", true);
94      conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
95      conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, MockLoadBalancer.class,
96          LoadBalancer.class);
97    }
98  
99    /**
100    * @throws java.lang.Exception
101    */
102   @AfterClass
103   public static void tearDownAfterClass() throws Exception {
104     TEST_UTIL.shutdownMiniCluster();
105   }
106 
107   /**
108    * @throws java.lang.Exception
109    */
110   @Before
111   public void setUp() throws Exception {
112     TEST_UTIL.startMiniHBaseCluster(2, 2);
113   }
114 
115   @After
116   public void after() throws Exception {
117     try {
118       // Some regionserver could fail to delete its znode.
119       // So shutdown could hang. Let's kill them all instead.
120       TEST_UTIL.getHBaseCluster().killAll();
121 
122       // Still need to clean things up
123       TEST_UTIL.shutdownMiniHBaseCluster();
124     } finally {
125       TEST_UTIL.getTestFileSystem().delete(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), true);
126       ZKUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase");
127     }
128   }
129 
130   private ZooKeeperWatcher getZooKeeperWatcher(HConnection c)
131   throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
132     Method getterZK = c.getClass().getDeclaredMethod("getKeepAliveZooKeeperWatcher");
133     getterZK.setAccessible(true);
134     return (ZooKeeperWatcher) getterZK.invoke(c);
135   }
136 
137 
138   /**
139    * See HBASE-1232 and http://wiki.apache.org/hadoop/ZooKeeper/FAQ#4.
140    * @throws IOException
141    * @throws InterruptedException
142    */
143   // fails frequently, disabled for now, see HBASE-6406
144   //@Test
145   public void testClientSessionExpired() throws Exception {
146     Configuration c = new Configuration(TEST_UTIL.getConfiguration());
147 
148     // We don't want to share the connection as we will check its state
149     c.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "1111");
150 
151     HConnection connection = HConnectionManager.getConnection(c);
152 
153     ZooKeeperWatcher connectionZK = getZooKeeperWatcher(connection);
154     LOG.info("ZooKeeperWatcher= 0x"+ Integer.toHexString(
155       connectionZK.hashCode()));
156     LOG.info("getRecoverableZooKeeper= 0x"+ Integer.toHexString(
157       connectionZK.getRecoverableZooKeeper().hashCode()));
158     LOG.info("session="+Long.toHexString(
159       connectionZK.getRecoverableZooKeeper().getSessionId()));
160 
161     TEST_UTIL.expireSession(connectionZK);
162 
163     LOG.info("Before using zkw state=" +
164       connectionZK.getRecoverableZooKeeper().getState());
165     // provoke session expiration by doing something with ZK
166     try {
167       connectionZK.getRecoverableZooKeeper().getZooKeeper().exists(
168         "/1/1", false);
169     } catch (KeeperException ignored) {
170     }
171 
172     // Check that the old ZK connection is closed, means we did expire
173     States state = connectionZK.getRecoverableZooKeeper().getState();
174     LOG.info("After using zkw state=" + state);
175     LOG.info("session="+Long.toHexString(
176       connectionZK.getRecoverableZooKeeper().getSessionId()));
177 
178     // It's asynchronous, so we may have to wait a little...
179     final long limit1 = System.currentTimeMillis() + 3000;
180     while (System.currentTimeMillis() < limit1 && state != States.CLOSED){
181       state = connectionZK.getRecoverableZooKeeper().getState();
182     }
183     LOG.info("After using zkw loop=" + state);
184     LOG.info("ZooKeeper should have timed out");
185     LOG.info("session="+Long.toHexString(
186       connectionZK.getRecoverableZooKeeper().getSessionId()));
187 
188     // It's surprising but sometimes we can still be in connected state.
189     // As it's known (even if not understood) we don't make the the test fail
190     // for this reason.)
191     // Assert.assertTrue("state=" + state, state == States.CLOSED);
192 
193     // Check that the client recovered
194     ZooKeeperWatcher newConnectionZK = getZooKeeperWatcher(connection);
195 
196     States state2 = newConnectionZK.getRecoverableZooKeeper().getState();
197     LOG.info("After new get state=" +state2);
198 
199     // As it's an asynchronous event we may got the same ZKW, if it's not
200     //  yet invalidated. Hence this loop.
201     final long limit2 = System.currentTimeMillis() + 3000;
202     while (System.currentTimeMillis() < limit2 &&
203       state2 != States.CONNECTED && state2 != States.CONNECTING) {
204 
205       newConnectionZK = getZooKeeperWatcher(connection);
206       state2 = newConnectionZK.getRecoverableZooKeeper().getState();
207     }
208     LOG.info("After new get state loop=" + state2);
209 
210     Assert.assertTrue(
211       state2 == States.CONNECTED || state2 == States.CONNECTING);
212 
213     connection.close();
214   }
215 
216   @Test (timeout = 120000)
217   public void testRegionServerSessionExpired() throws Exception {
218     LOG.info("Starting testRegionServerSessionExpired");
219     TEST_UTIL.expireRegionServerSession(0);
220     testSanity("testRegionServerSessionExpired");
221   }
222 
223   @Test(timeout = 300000)
224   public void testMasterSessionExpired() throws Exception {
225     LOG.info("Starting testMasterSessionExpired");
226     TEST_UTIL.expireMasterSession();
227     testSanity("testMasterSessionExpired");
228   }
229 
230   /**
231    * Master recovery when the znode already exists. Internally, this
232    *  test differs from {@link #testMasterSessionExpired} because here
233    *  the master znode will exist in ZK.
234    */
235   @Test(timeout = 300000)
236   public void testMasterZKSessionRecoveryFailure() throws Exception {
237     LOG.info("Starting testMasterZKSessionRecoveryFailure");
238     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
239     HMaster m = cluster.getMaster();
240     m.abort("Test recovery from zk session expired",
241         new KeeperException.SessionExpiredException());
242     assertTrue(m.isStopped()); // Master doesn't recover any more
243     testSanity("testMasterZKSessionRecoveryFailure");
244   }
245 
246   /**
247    * Make sure we can use the cluster
248    * @throws Exception
249    */
250   private void testSanity(final String testName) throws Exception{
251     String tableName = testName + "_" + System.currentTimeMillis();
252     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
253     HColumnDescriptor family = new HColumnDescriptor("fam");
254     desc.addFamily(family);
255     LOG.info("Creating table " + tableName);
256     Admin admin = TEST_UTIL.getHBaseAdmin();
257     try {
258       admin.createTable(desc);
259     } finally {
260       admin.close();
261     }
262 
263     Table table =
264       new HTable(new Configuration(TEST_UTIL.getConfiguration()), desc.getTableName());
265     Put put = new Put(Bytes.toBytes("testrow"));
266     put.add(Bytes.toBytes("fam"),
267         Bytes.toBytes("col"), Bytes.toBytes("testdata"));
268     LOG.info("Putting table " + tableName);
269     table.put(put);
270     table.close();
271   }
272 
273   @Test
274   public void testMultipleZK()
275   throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException {
276     Table localMeta =
277       new HTable(new Configuration(TEST_UTIL.getConfiguration()), TableName.META_TABLE_NAME);
278     Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration());
279     otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1");
280     Table ipMeta = new HTable(otherConf, TableName.META_TABLE_NAME);
281 
282     // dummy, just to open the connection
283     final byte [] row = new byte [] {'r'};
284     localMeta.exists(new Get(row));
285     ipMeta.exists(new Get(row));
286 
287     // make sure they aren't the same
288     ZooKeeperWatcher z1 =
289       getZooKeeperWatcher(HConnectionManager.getConnection(localMeta.getConfiguration()));
290     ZooKeeperWatcher z2 =
291       getZooKeeperWatcher(HConnectionManager.getConnection(otherConf));
292     assertFalse(z1 == z2);
293     assertFalse(z1.getQuorum().equals(z2.getQuorum()));
294 
295     localMeta.close();
296     ipMeta.close();
297   }
298 
299   /**
300    * Create a znode with data
301    * @throws Exception
302    */
303   @Test
304   public void testCreateWithParents() throws Exception {
305     ZooKeeperWatcher zkw =
306         new ZooKeeperWatcher(new Configuration(TEST_UTIL.getConfiguration()),
307             TestZooKeeper.class.getName(), null);
308     byte[] expectedData = new byte[] { 1, 2, 3 };
309     ZKUtil.createWithParents(zkw, "/l1/l2/l3/l4/testCreateWithParents", expectedData);
310     byte[] data = ZKUtil.getData(zkw, "/l1/l2/l3/l4/testCreateWithParents");
311     assertTrue(Bytes.equals(expectedData, data));
312     ZKUtil.deleteNodeRecursively(zkw, "/l1");
313 
314     ZKUtil.createWithParents(zkw, "/testCreateWithParents", expectedData);
315     data = ZKUtil.getData(zkw, "/testCreateWithParents");
316     assertTrue(Bytes.equals(expectedData, data));
317     ZKUtil.deleteNodeRecursively(zkw, "/testCreateWithParents");
318   }
319 
320   /**
321    * Create a bunch of znodes in a hierarchy, try deleting one that has childs (it will fail), then
322    * delete it recursively, then delete the last znode
323    * @throws Exception
324    */
325   @Test
326   public void testZNodeDeletes() throws Exception {
327     ZooKeeperWatcher zkw = new ZooKeeperWatcher(
328       new Configuration(TEST_UTIL.getConfiguration()),
329       TestZooKeeper.class.getName(), null);
330     ZKUtil.createWithParents(zkw, "/l1/l2/l3/l4");
331     try {
332       ZKUtil.deleteNode(zkw, "/l1/l2");
333       fail("We should not be able to delete if znode has childs");
334     } catch (KeeperException ex) {
335       assertNotNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2/l3/l4", null));
336     }
337     ZKUtil.deleteNodeRecursively(zkw, "/l1/l2");
338     // make sure it really is deleted
339     assertNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2/l3/l4", null));
340 
341     // do the same delete again and make sure it doesn't crash
342     ZKUtil.deleteNodeRecursively(zkw, "/l1/l2");
343 
344     ZKUtil.deleteNode(zkw, "/l1");
345     assertNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2", null));
346   }
347 
348   @Test
349   public void testClusterKey() throws Exception {
350     testKey("server", "2181", "hbase");
351     testKey("server1,server2,server3", "2181", "hbase");
352     try {
353       ZKUtil.transformClusterKey("2181:hbase");
354     } catch (IOException ex) {
355       // OK
356     }
357   }
358 
359   private void testKey(String ensemble, String port, String znode)
360       throws IOException {
361     Configuration conf = new Configuration();
362     String key = ensemble+":"+port+":"+znode;
363     String[] parts = ZKUtil.transformClusterKey(key);
364     assertEquals(ensemble, parts[0]);
365     assertEquals(port, parts[1]);
366     assertEquals(znode, parts[2]);
367     ZKUtil.applyClusterKeyToConf(conf, key);
368     assertEquals(parts[0], conf.get(HConstants.ZOOKEEPER_QUORUM));
369     assertEquals(parts[1], conf.get(HConstants.ZOOKEEPER_CLIENT_PORT));
370     assertEquals(parts[2], conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
371     String reconstructedKey = ZKUtil.getZooKeeperClusterKey(conf);
372     assertEquals(key, reconstructedKey);
373   }
374 
375   /**
376    * A test for HBASE-3238
377    * @throws IOException A connection attempt to zk failed
378    * @throws InterruptedException One of the non ZKUtil actions was interrupted
379    * @throws KeeperException Any of the zookeeper connections had a
380    * KeeperException
381    */
382   @Test
383   public void testCreateSilentIsReallySilent() throws InterruptedException,
384       KeeperException, IOException {
385     Configuration c = TEST_UTIL.getConfiguration();
386 
387     String aclZnode = "/aclRoot";
388     String quorumServers = ZKConfig.getZKQuorumServersString(c);
389     int sessionTimeout = 5 * 1000; // 5 seconds
390     ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance);
391     zk.addAuthInfo("digest", "hbase:rox".getBytes());
392 
393     // Assumes the  root of the ZooKeeper space is writable as it creates a node
394     // wherever the cluster home is defined.
395     ZooKeeperWatcher zk2 = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
396       "testCreateSilentIsReallySilent", null);
397 
398     // Save the previous ACL
399     Stat s =  null;
400     List<ACL> oldACL = null;
401     while (true) {
402       try {
403         s = new Stat();
404         oldACL = zk.getACL("/", s);
405         break;
406       } catch (KeeperException e) {
407         switch (e.code()) {
408           case CONNECTIONLOSS:
409           case SESSIONEXPIRED:
410           case OPERATIONTIMEOUT:
411             LOG.warn("Possibly transient ZooKeeper exception", e);
412             Threads.sleep(100);
413             break;
414          default:
415             throw e;
416         }
417       }
418     }
419 
420     // I set this acl after the attempted creation of the cluster home node.
421     // Add retries in case of retryable zk exceptions.
422     while (true) {
423       try {
424         zk.setACL("/", ZooDefs.Ids.CREATOR_ALL_ACL, -1);
425         break;
426       } catch (KeeperException e) {
427         switch (e.code()) {
428           case CONNECTIONLOSS:
429           case SESSIONEXPIRED:
430           case OPERATIONTIMEOUT:
431             LOG.warn("Possibly transient ZooKeeper exception: " + e);
432             Threads.sleep(100);
433             break;
434          default:
435             throw e;
436         }
437       }
438     }
439 
440     while (true) {
441       try {
442         zk.create(aclZnode, null, ZooDefs.Ids.CREATOR_ALL_ACL, CreateMode.PERSISTENT);
443         break;
444       } catch (KeeperException e) {
445         switch (e.code()) {
446           case CONNECTIONLOSS:
447           case SESSIONEXPIRED:
448           case OPERATIONTIMEOUT:
449             LOG.warn("Possibly transient ZooKeeper exception: " + e);
450             Threads.sleep(100);
451             break;
452          default:
453             throw e;
454         }
455       }
456     }
457     zk.close();
458     ZKUtil.createAndFailSilent(zk2, aclZnode);
459 
460     // Restore the ACL
461     ZooKeeper zk3 = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance);
462     zk3.addAuthInfo("digest", "hbase:rox".getBytes());
463     try {
464       zk3.setACL("/", oldACL, -1);
465     } finally {
466       zk3.close();
467     }
468  }
469 
470   /**
471    * Test should not fail with NPE when getChildDataAndWatchForNewChildren
472    * invoked with wrongNode
473    */
474   @Test
475   @SuppressWarnings("deprecation")
476   public void testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE()
477       throws Exception {
478     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
479         "testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE", null);
480     ZKUtil.getChildDataAndWatchForNewChildren(zkw, "/wrongNode");
481   }
482 
483   /**
484    * Tests that the master does not call retainAssignment after recovery from expired zookeeper
485    * session. Without the HBASE-6046 fix master always tries to assign all the user regions by
486    * calling retainAssignment.
487    */
488   @Test(timeout = 300000)
489   public void testRegionAssignmentAfterMasterRecoveryDueToZKExpiry() throws Exception {
490     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
491     cluster.startRegionServer();
492     cluster.waitForActiveAndReadyMaster(10000);
493     HMaster m = cluster.getMaster();
494     final ZooKeeperWatcher zkw = m.getZooKeeper();
495     // now the cluster is up. So assign some regions.
496     try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
497       byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"),
498           Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
499           Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j") };
500       String tableName = "testRegionAssignmentAfterMasterRecoveryDueToZKExpiry";
501       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
502       htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
503       admin.createTable(htd, SPLIT_KEYS);
504       TEST_UTIL.waitUntilNoRegionsInTransition(60000);
505       m.getZooKeeper().close();
506       MockLoadBalancer.retainAssignCalled = false;
507       final int expectedNumOfListeners = countPermanentListeners(zkw);
508       m.abort("Test recovery from zk session expired",
509           new KeeperException.SessionExpiredException());
510       assertTrue(m.isStopped()); // Master doesn't recover any more
511       // The recovered master should not call retainAssignment, as it is not a
512       // clean startup.
513       assertFalse("Retain assignment should not be called", MockLoadBalancer.retainAssignCalled);
514       // number of listeners should be same as the value before master aborted
515       // wait for new master is initialized
516       cluster.waitForActiveAndReadyMaster(120000);
517       final HMaster newMaster = cluster.getMasterThread().getMaster();
518       assertEquals(expectedNumOfListeners, countPermanentListeners(newMaster.getZooKeeper()));
519     }
520   }
521 
522   /**
523    * Count listeners in zkw excluding listeners, that belongs to workers or other
524    * temporary processes.
525    */
526   private int countPermanentListeners(ZooKeeperWatcher watcher) {
527     return countListeners(watcher, ZkSplitLogWorkerCoordination.class);
528   }
529 
530   /**
531    * Count listeners in zkw excluding provided classes
532    */
533   private int countListeners(ZooKeeperWatcher watcher, Class<?>... exclude) {
534     int cnt = 0;
535     for (Object o : watcher.getListeners()) {
536       boolean skip = false;
537       for (Class<?> aClass : exclude) {
538         if (aClass.isAssignableFrom(o.getClass())) {
539           skip = true;
540           break;
541         }
542       }
543       if (!skip) {
544         cnt += 1;
545       }
546     }
547     return cnt;
548   }
549 
550 
551   /**
552    * Tests whether the logs are split when master recovers from a expired zookeeper session and an
553    * RS goes down.
554    */
555   @Test(timeout = 300000)
556   public void testLogSplittingAfterMasterRecoveryDueToZKExpiry() throws IOException,
557       KeeperException, InterruptedException {
558     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
559     cluster.startRegionServer();
560     HMaster m = cluster.getMaster();
561     // now the cluster is up. So assign some regions.
562     Admin admin = TEST_UTIL.getHBaseAdmin();
563     Table table = null;
564     try {
565       byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("1"), Bytes.toBytes("2"),
566         Bytes.toBytes("3"), Bytes.toBytes("4"), Bytes.toBytes("5") };
567 
568       String tableName = "testLogSplittingAfterMasterRecoveryDueToZKExpiry";
569       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
570       HColumnDescriptor hcd = new HColumnDescriptor("col");
571       htd.addFamily(hcd);
572       admin.createTable(htd, SPLIT_KEYS);
573       ZooKeeperWatcher zooKeeperWatcher = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
574       ZKAssign.blockUntilNoRIT(zooKeeperWatcher);
575       table = new HTable(TEST_UTIL.getConfiguration(), htd.getTableName());
576       Put p;
577       int numberOfPuts;
578       for (numberOfPuts = 0; numberOfPuts < 6; numberOfPuts++) {
579         p = new Put(Bytes.toBytes(numberOfPuts));
580         p.add(Bytes.toBytes("col"), Bytes.toBytes("ql"), Bytes.toBytes("value" + numberOfPuts));
581         table.put(p);
582       }
583       m.getZooKeeper().close();
584       m.abort("Test recovery from zk session expired",
585         new KeeperException.SessionExpiredException());
586       assertTrue(m.isStopped()); // Master doesn't recover any more
587       cluster.getRegionServer(0).abort("Aborting");
588       // Without patch for HBASE-6046 this test case will always timeout
589       // with patch the test case should pass.
590       Scan scan = new Scan();
591       int numberOfRows = 0;
592       ResultScanner scanner = table.getScanner(scan);
593       Result[] result = scanner.next(1);
594       while (result != null && result.length > 0) {
595         numberOfRows++;
596         result = scanner.next(1);
597       }
598       assertEquals("Number of rows should be equal to number of puts.", numberOfPuts,
599         numberOfRows);
600     } finally {
601       if (table != null) table.close();
602       admin.close();
603     }
604   }
605 
606   static class MockLoadBalancer extends SimpleLoadBalancer {
607     static boolean retainAssignCalled = false;
608 
609     @Override
610     public Map<ServerName, List<HRegionInfo>> retainAssignment(
611         Map<HRegionInfo, ServerName> regions, List<ServerName> servers) {
612       retainAssignCalled = true;
613       return super.retainAssignment(regions, servers);
614     }
615   }
616 
617 }
618