View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertTrue;
25  import static org.mockito.Mockito.doReturn;
26  import static org.mockito.Mockito.spy;
27  
28  import java.io.IOException;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.SortedMap;
32  import java.util.TreeMap;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.CoordinatedStateManager;
42  import org.apache.hadoop.hbase.HBaseTestingUtility;
43  import org.apache.hadoop.hbase.HColumnDescriptor;
44  import org.apache.hadoop.hbase.HConstants;
45  import org.apache.hadoop.hbase.HRegionInfo;
46  import org.apache.hadoop.hbase.HTableDescriptor;
47  import org.apache.hadoop.hbase.MetaMockingUtil;
48  import org.apache.hadoop.hbase.NamespaceDescriptor;
49  import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
50  import org.apache.hadoop.hbase.Server;
51  import org.apache.hadoop.hbase.ServerName;
52  import org.apache.hadoop.hbase.testclassification.SmallTests;
53  import org.apache.hadoop.hbase.TableDescriptors;
54  import org.apache.hadoop.hbase.TableName;
55  import org.apache.hadoop.hbase.client.ClusterConnection;
56  import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
57  import org.apache.hadoop.hbase.client.Result;
58  import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
59  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
60  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
61  import org.apache.hadoop.hbase.executor.ExecutorService;
62  import org.apache.hadoop.hbase.io.Reference;
63  import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
64  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
65  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
66  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
67  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
68  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
69  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
70  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
71  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
72  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
73  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
74  import org.apache.hadoop.hbase.regionserver.HStore;
75  import org.apache.hadoop.hbase.util.Bytes;
76  import org.apache.hadoop.hbase.util.FSUtils;
77  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
78  import org.apache.hadoop.hbase.util.Triple;
79  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
80  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
81  import org.junit.Test;
82  import org.junit.experimental.categories.Category;
83  import org.mockito.Mockito;
84  import org.mockito.invocation.InvocationOnMock;
85  import org.mockito.stubbing.Answer;
86  
87  import com.google.protobuf.RpcController;
88  import com.google.protobuf.Service;
89  import com.google.protobuf.ServiceException;
90  
91  @Category(SmallTests.class)
92  public class TestCatalogJanitor {
93    private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
94  
95    /**
96     * Pseudo server for below tests.
97     * Be sure to call stop on the way out else could leave some mess around.
98     */
99    class MockServer implements Server {
100     private final ClusterConnection connection;
101     private final Configuration c;
102 
103     MockServer(final HBaseTestingUtility htu)
104     throws NotAllMetaRegionsOnlineException, IOException, InterruptedException {
105       this.c = htu.getConfiguration();
106       ClientProtos.ClientService.BlockingInterface ri =
107         Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
108       MutateResponse.Builder builder = MutateResponse.newBuilder();
109       builder.setProcessed(true);
110       try {
111         Mockito.when(ri.mutate(
112           (RpcController)Mockito.any(), (MutateRequest)Mockito.any())).
113             thenReturn(builder.build());
114       } catch (ServiceException se) {
115         throw ProtobufUtil.getRemoteException(se);
116       }
117       try {
118         Mockito.when(ri.multi(
119           (RpcController)Mockito.any(), (MultiRequest)Mockito.any())).
120             thenAnswer(new Answer<MultiResponse>() {
121               @Override
122               public MultiResponse answer(InvocationOnMock invocation) throws Throwable {
123                 return buildMultiResponse( (MultiRequest)invocation.getArguments()[1]);
124               }
125             });
126       } catch (ServiceException se) {
127         throw ProtobufUtil.getRemoteException(se);
128       }
129       // Mock an HConnection and a AdminProtocol implementation.  Have the
130       // HConnection return the HRI.  Have the HRI return a few mocked up responses
131       // to make our test work.
132       this.connection =
133         HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c,
134           Mockito.mock(AdminProtos.AdminService.BlockingInterface.class), ri,
135             ServerName.valueOf("example.org,12345,6789"),
136           HRegionInfo.FIRST_META_REGIONINFO);
137       // Set hbase.rootdir into test dir.
138       FileSystem fs = FileSystem.get(this.c);
139       Path rootdir = FSUtils.getRootDir(this.c);
140       FSUtils.setRootDir(this.c, rootdir);
141       AdminProtos.AdminService.BlockingInterface hri =
142         Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
143     }
144 
145     @Override
146     public ClusterConnection getConnection() {
147       return this.connection;
148     }
149 
150     @Override
151     public MetaTableLocator getMetaTableLocator() {
152       return null;
153     }
154 
155     @Override
156     public Configuration getConfiguration() {
157       return this.c;
158     }
159 
160     @Override
161     public ServerName getServerName() {
162       return ServerName.valueOf("mockserver.example.org", 1234, -1L);
163     }
164 
165     @Override
166     public ZooKeeperWatcher getZooKeeper() {
167       return null;
168     }
169 
170     @Override
171     public CoordinatedStateManager getCoordinatedStateManager() {
172       BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class);
173       SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class);
174       Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c);
175       SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class);
176       Mockito.when(c.getDetails()).thenReturn(d);
177       return m;
178     }
179 
180     @Override
181     public void abort(String why, Throwable e) {
182       //no-op
183     }
184 
185     @Override
186     public boolean isAborted() {
187       return false;
188     }
189 
190     @Override
191     public boolean isStopped() {
192       return false;
193     }
194 
195     @Override
196     public void stop(String why) {
197     }
198   }
199 
200   /**
201    * Mock MasterServices for tests below.
202    */
203   class MockMasterServices implements MasterServices {
204     private final MasterFileSystem mfs;
205     private final AssignmentManager asm;
206 
207     MockMasterServices(final Server server) throws IOException {
208       this.mfs = new MasterFileSystem(server, this);
209       this.asm = Mockito.mock(AssignmentManager.class);
210     }
211 
212     @Override
213     public void checkTableModifiable(TableName tableName) throws IOException {
214       //no-op
215     }
216 
217     @Override
218     public void createTable(HTableDescriptor desc, byte[][] splitKeys)
219         throws IOException {
220       // no-op
221     }
222 
223     @Override
224     public AssignmentManager getAssignmentManager() {
225       return this.asm;
226     }
227 
228     @Override
229     public ExecutorService getExecutorService() {
230       return null;
231     }
232 
233     @Override
234     public MasterFileSystem getMasterFileSystem() {
235       return this.mfs;
236     }
237 
238     @Override
239     public MasterCoprocessorHost getMasterCoprocessorHost() {
240       return null;
241     }
242 
243     @Override
244     public ServerManager getServerManager() {
245       return null;
246     }
247 
248     @Override
249     public ZooKeeperWatcher getZooKeeper() {
250       return null;
251     }
252 
253     @Override
254     public CoordinatedStateManager getCoordinatedStateManager() {
255       return null;
256     }
257 
258     @Override
259     public MetaTableLocator getMetaTableLocator() {
260       return null;
261     }
262 
263     @Override
264     public ClusterConnection getConnection() {
265       return null;
266     }
267 
268     @Override
269     public Configuration getConfiguration() {
270       return mfs.conf;
271     }
272 
273     @Override
274     public ServerName getServerName() {
275       return null;
276     }
277 
278     @Override
279     public void abort(String why, Throwable e) {
280       //no-op
281     }
282 
283     @Override
284     public boolean isAborted() {
285       return false;
286     }
287 
288     private boolean stopped = false;
289 
290     @Override
291     public void stop(String why) {
292       stopped = true;
293     }
294 
295     @Override
296     public boolean isStopped() {
297       return stopped;
298     }
299 
300     @Override
301     public TableDescriptors getTableDescriptors() {
302       return new TableDescriptors() {
303         @Override
304         public HTableDescriptor remove(TableName tablename) throws IOException {
305           // TODO Auto-generated method stub
306           return null;
307         }
308 
309         @Override
310         public Map<String, HTableDescriptor> getAll() throws IOException {
311           // TODO Auto-generated method stub
312           return null;
313         }
314 
315         @Override
316         public HTableDescriptor get(TableName tablename)
317         throws IOException {
318           return createHTableDescriptor();
319         }
320 
321         @Override
322         public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
323           return null;
324         }
325 
326         @Override
327         public void add(HTableDescriptor htd) throws IOException {
328           // TODO Auto-generated method stub
329 
330         }
331         @Override
332         public void setCacheOn() throws IOException {
333         }
334 
335         @Override
336         public void setCacheOff() throws IOException {
337         }
338       };
339     }
340 
341     @Override
342     public boolean isServerShutdownHandlerEnabled() {
343       return true;
344     }
345 
346     @Override
347     public boolean registerService(Service instance) {
348       return false;
349     }
350 
351     @Override
352     public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
353       //To change body of implemented methods use File | Settings | File Templates.
354     }
355 
356     @Override
357     public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
358       //To change body of implemented methods use File | Settings | File Templates.
359     }
360 
361     @Override
362     public void deleteNamespace(String name) throws IOException {
363       //To change body of implemented methods use File | Settings | File Templates.
364     }
365 
366     @Override
367     public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
368       return null;  //To change body of implemented methods use File | Settings | File Templates.
369     }
370 
371     @Override
372     public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
373       return null;  //To change body of implemented methods use File | Settings | File Templates.
374     }
375 
376     @Override
377     public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
378       return null;  //To change body of implemented methods use File | Settings | File Templates.
379     }
380 
381     @Override
382     public List<TableName> listTableNamesByNamespace(String name) throws IOException {
383       return null;
384     }
385 
386     @Override
387     public void deleteTable(TableName tableName) throws IOException { }
388 
389     @Override
390     public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException { }
391 
392 
393     @Override
394     public void modifyTable(TableName tableName, HTableDescriptor descriptor)
395         throws IOException { }
396 
397     @Override
398     public void enableTable(TableName tableName) throws IOException { }
399 
400     @Override
401     public void disableTable(TableName tableName) throws IOException { }
402 
403     @Override
404     public void addColumn(TableName tableName, HColumnDescriptor column)
405         throws IOException { }
406 
407     @Override
408     public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
409         throws IOException { }
410 
411     @Override
412     public void deleteColumn(TableName tableName, byte[] columnName)
413         throws IOException { }
414 
415     @Override
416     public TableLockManager getTableLockManager() {
417       return null;
418     }
419 
420     @Override
421     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
422         boolean forcible) throws IOException {
423     }
424 
425     @Override
426     public boolean isInitialized() {
427       // Auto-generated method stub
428       return false;
429     }
430   }
431 
432   @Test
433   public void testCleanParent() throws IOException, InterruptedException {
434     HBaseTestingUtility htu = new HBaseTestingUtility();
435     setRootDirAndCleanIt(htu, "testCleanParent");
436     Server server = new MockServer(htu);
437     try {
438       MasterServices services = new MockMasterServices(server);
439       CatalogJanitor janitor = new CatalogJanitor(server, services);
440       // Create regions.
441       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
442       htd.addFamily(new HColumnDescriptor("f"));
443       HRegionInfo parent =
444         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
445             Bytes.toBytes("eee"));
446       HRegionInfo splita =
447         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
448             Bytes.toBytes("ccc"));
449       HRegionInfo splitb =
450         new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
451             Bytes.toBytes("eee"));
452       // Test that when both daughter regions are in place, that we do not
453       // remove the parent.
454       Result r = createResult(parent, splita, splitb);
455       // Add a reference under splitA directory so we don't clear out the parent.
456       Path rootdir = services.getMasterFileSystem().getRootDir();
457       Path tabledir =
458         FSUtils.getTableDir(rootdir, htd.getTableName());
459       Path storedir = HStore.getStoreHomedir(tabledir, splita,
460           htd.getColumnFamilies()[0].getName());
461       Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
462       long now = System.currentTimeMillis();
463       // Reference name has this format: StoreFile#REF_NAME_PARSER
464       Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
465       FileSystem fs = services.getMasterFileSystem().getFileSystem();
466       Path path = ref.write(fs, p);
467       assertTrue(fs.exists(path));
468       assertFalse(janitor.cleanParent(parent, r));
469       // Remove the reference file and try again.
470       assertTrue(fs.delete(p, true));
471       assertTrue(janitor.cleanParent(parent, r));
472     } finally {
473       server.stop("shutdown");
474     }
475   }
476 
477   /**
478    * Make sure parent gets cleaned up even if daughter is cleaned up before it.
479    * @throws IOException
480    * @throws InterruptedException
481    */
482   @Test
483   public void testParentCleanedEvenIfDaughterGoneFirst()
484   throws IOException, InterruptedException {
485     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
486       "testParentCleanedEvenIfDaughterGoneFirst", Bytes.toBytes("eee"));
487   }
488 
489   /**
490    * Make sure last parent with empty end key gets cleaned up even if daughter is cleaned up before it.
491    * @throws IOException
492    * @throws InterruptedException
493    */
494   @Test
495   public void testLastParentCleanedEvenIfDaughterGoneFirst()
496   throws IOException, InterruptedException {
497     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
498       "testLastParentCleanedEvenIfDaughterGoneFirst", new byte[0]);
499   }
500 
501   /**
502    * Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
503    *
504    * @param rootDir the test case name, used as the HBase testing utility root
505    * @param lastEndKey the end key of the split parent
506    * @throws IOException
507    * @throws InterruptedException
508    */
509   private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
510   final String rootDir, final byte[] lastEndKey)
511   throws IOException, InterruptedException {
512     HBaseTestingUtility htu = new HBaseTestingUtility();
513     setRootDirAndCleanIt(htu, rootDir);
514     Server server = new MockServer(htu);
515     MasterServices services = new MockMasterServices(server);
516     CatalogJanitor janitor = new CatalogJanitor(server, services);
517     final HTableDescriptor htd = createHTableDescriptor();
518 
519     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
520 
521     // Parent
522     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
523       lastEndKey);
524     // Sleep a second else the encoded name on these regions comes out
525     // same for all with same start key and made in same second.
526     Thread.sleep(1001);
527 
528     // Daughter a
529     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
530       Bytes.toBytes("ccc"));
531     Thread.sleep(1001);
532     // Make daughters of daughter a; splitaa and splitab.
533     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
534       Bytes.toBytes("bbb"));
535     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
536       Bytes.toBytes("ccc"));
537 
538     // Daughter b
539     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
540       lastEndKey);
541     Thread.sleep(1001);
542     // Make Daughters of daughterb; splitba and splitbb.
543     HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
544       Bytes.toBytes("ddd"));
545     HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"),
546     lastEndKey);
547 
548     // First test that our Comparator works right up in CatalogJanitor.
549     // Just fo kicks.
550     SortedMap<HRegionInfo, Result> regions =
551       new TreeMap<HRegionInfo, Result>(new CatalogJanitor.SplitParentFirstComparator());
552     // Now make sure that this regions map sorts as we expect it to.
553     regions.put(parent, createResult(parent, splita, splitb));
554     regions.put(splitb, createResult(splitb, splitba, splitbb));
555     regions.put(splita, createResult(splita, splitaa, splitab));
556     // Assert its properly sorted.
557     int index = 0;
558     for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
559       if (index == 0) {
560         assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
561       } else if (index == 1) {
562         assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
563       } else if (index == 2) {
564         assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
565       }
566       index++;
567     }
568 
569     // Now play around with the cleanParent function.  Create a ref from splita
570     // up to the parent.
571     Path splitaRef =
572       createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
573     // Make sure actual super parent sticks around because splita has a ref.
574     assertFalse(janitor.cleanParent(parent, regions.get(parent)));
575 
576     //splitba, and split bb, do not have dirs in fs.  That means that if
577     // we test splitb, it should get cleaned up.
578     assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
579 
580     // Now remove ref from splita to parent... so parent can be let go and so
581     // the daughter splita can be split (can't split if still references).
582     // BUT make the timing such that the daughter gets cleaned up before we
583     // can get a chance to let go of the parent.
584     FileSystem fs = FileSystem.get(htu.getConfiguration());
585     assertTrue(fs.delete(splitaRef, true));
586     // Create the refs from daughters of splita.
587     Path splitaaRef =
588       createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
589     Path splitabRef =
590       createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
591 
592     // Test splita.  It should stick around because references from splitab, etc.
593     assertFalse(janitor.cleanParent(splita, regions.get(splita)));
594 
595     // Now clean up parent daughter first.  Remove references from its daughters.
596     assertTrue(fs.delete(splitaaRef, true));
597     assertTrue(fs.delete(splitabRef, true));
598     assertTrue(janitor.cleanParent(splita, regions.get(splita)));
599 
600     // Super parent should get cleaned up now both splita and splitb are gone.
601     assertTrue(janitor.cleanParent(parent, regions.get(parent)));
602 
603     services.stop("test finished");
604     janitor.join();
605   }
606 
607   /**
608    * CatalogJanitor.scan() should not clean parent regions if their own
609    * parents are still referencing them. This ensures that grandfather regions
610    * do not point to deleted parent regions.
611    */
612   @Test
613   public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
614     HBaseTestingUtility htu = new HBaseTestingUtility();
615     setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
616     Server server = new MockServer(htu);
617     MasterServices services = new MockMasterServices(server);
618 
619     final HTableDescriptor htd = createHTableDescriptor();
620 
621     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
622 
623     // Parent
624     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
625       new byte[0], true);
626     // Sleep a second else the encoded name on these regions comes out
627     // same for all with same start key and made in same second.
628     Thread.sleep(1001);
629 
630     // Daughter a
631     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
632       Bytes.toBytes("ccc"), true);
633     Thread.sleep(1001);
634     // Make daughters of daughter a; splitaa and splitab.
635     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
636       Bytes.toBytes("bbb"), false);
637     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
638       Bytes.toBytes("ccc"), false);
639 
640     // Daughter b
641     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
642         new byte[0]);
643     Thread.sleep(1001);
644 
645     final Map<HRegionInfo, Result> splitParents =
646         new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
647     splitParents.put(parent, createResult(parent, splita, splitb));
648     splita.setOffline(true); //simulate that splita goes offline when it is split
649     splitParents.put(splita, createResult(splita, splitaa,splitab));
650 
651     final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
652     CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
653     doReturn(new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
654             10, mergedRegions, splitParents)).when(janitor)
655         .getMergedRegionsAndSplitParents();
656 
657     //create ref from splita to parent
658     Path splitaRef =
659         createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
660 
661     //parent and A should not be removed
662     assertEquals(0, janitor.scan());
663 
664     //now delete the ref
665     FileSystem fs = FileSystem.get(htu.getConfiguration());
666     assertTrue(fs.delete(splitaRef, true));
667 
668     //now, both parent, and splita can be deleted
669     assertEquals(2, janitor.scan());
670 
671     services.stop("test finished");
672     janitor.join();
673   }
674 
675   /**
676    * Test that we correctly archive all the storefiles when a region is deleted
677    * @throws Exception
678    */
679   @Test
680   public void testSplitParentFirstComparator() {
681     SplitParentFirstComparator comp = new SplitParentFirstComparator();
682     final HTableDescriptor htd = createHTableDescriptor();
683 
684     /*  Region splits:
685      *
686      *  rootRegion --- firstRegion --- firstRegiona
687      *              |               |- firstRegionb
688      *              |
689      *              |- lastRegion --- lastRegiona  --- lastRegionaa
690      *                             |                |- lastRegionab
691      *                             |- lastRegionb
692      *
693      *  rootRegion   :   []  - []
694      *  firstRegion  :   []  - bbb
695      *  lastRegion   :   bbb - []
696      *  firstRegiona :   []  - aaa
697      *  firstRegionb :   aaa - bbb
698      *  lastRegiona  :   bbb - ddd
699      *  lastRegionb  :   ddd - []
700      */
701 
702     // root region
703     HRegionInfo rootRegion = new HRegionInfo(htd.getTableName(),
704       HConstants.EMPTY_START_ROW,
705       HConstants.EMPTY_END_ROW, true);
706     HRegionInfo firstRegion = new HRegionInfo(htd.getTableName(),
707       HConstants.EMPTY_START_ROW,
708       Bytes.toBytes("bbb"), true);
709     HRegionInfo lastRegion = new HRegionInfo(htd.getTableName(),
710       Bytes.toBytes("bbb"),
711       HConstants.EMPTY_END_ROW, true);
712 
713     assertTrue(comp.compare(rootRegion, rootRegion) == 0);
714     assertTrue(comp.compare(firstRegion, firstRegion) == 0);
715     assertTrue(comp.compare(lastRegion, lastRegion) == 0);
716     assertTrue(comp.compare(rootRegion, firstRegion) < 0);
717     assertTrue(comp.compare(rootRegion, lastRegion) < 0);
718     assertTrue(comp.compare(firstRegion, lastRegion) < 0);
719 
720     //first region split into a, b
721     HRegionInfo firstRegiona = new HRegionInfo(htd.getTableName(),
722       HConstants.EMPTY_START_ROW,
723       Bytes.toBytes("aaa"), true);
724     HRegionInfo firstRegionb = new HRegionInfo(htd.getTableName(),
725         Bytes.toBytes("aaa"),
726       Bytes.toBytes("bbb"), true);
727     //last region split into a, b
728     HRegionInfo lastRegiona = new HRegionInfo(htd.getTableName(),
729       Bytes.toBytes("bbb"),
730       Bytes.toBytes("ddd"), true);
731     HRegionInfo lastRegionb = new HRegionInfo(htd.getTableName(),
732       Bytes.toBytes("ddd"),
733       HConstants.EMPTY_END_ROW, true);
734 
735     assertTrue(comp.compare(firstRegiona, firstRegiona) == 0);
736     assertTrue(comp.compare(firstRegionb, firstRegionb) == 0);
737     assertTrue(comp.compare(rootRegion, firstRegiona) < 0);
738     assertTrue(comp.compare(rootRegion, firstRegionb) < 0);
739     assertTrue(comp.compare(firstRegion, firstRegiona) < 0);
740     assertTrue(comp.compare(firstRegion, firstRegionb) < 0);
741     assertTrue(comp.compare(firstRegiona, firstRegionb) < 0);
742 
743     assertTrue(comp.compare(lastRegiona, lastRegiona) == 0);
744     assertTrue(comp.compare(lastRegionb, lastRegionb) == 0);
745     assertTrue(comp.compare(rootRegion, lastRegiona) < 0);
746     assertTrue(comp.compare(rootRegion, lastRegionb) < 0);
747     assertTrue(comp.compare(lastRegion, lastRegiona) < 0);
748     assertTrue(comp.compare(lastRegion, lastRegionb) < 0);
749     assertTrue(comp.compare(lastRegiona, lastRegionb) < 0);
750 
751     assertTrue(comp.compare(firstRegiona, lastRegiona) < 0);
752     assertTrue(comp.compare(firstRegiona, lastRegionb) < 0);
753     assertTrue(comp.compare(firstRegionb, lastRegiona) < 0);
754     assertTrue(comp.compare(firstRegionb, lastRegionb) < 0);
755 
756     HRegionInfo lastRegionaa = new HRegionInfo(htd.getTableName(),
757       Bytes.toBytes("bbb"),
758       Bytes.toBytes("ccc"), false);
759     HRegionInfo lastRegionab = new HRegionInfo(htd.getTableName(),
760       Bytes.toBytes("ccc"),
761       Bytes.toBytes("ddd"), false);
762 
763     assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0);
764     assertTrue(comp.compare(lastRegiona, lastRegionab) < 0);
765     assertTrue(comp.compare(lastRegionaa, lastRegionab) < 0);
766 
767   }
768 
769   @Test
770   public void testArchiveOldRegion() throws Exception {
771     String table = "table";
772     HBaseTestingUtility htu = new HBaseTestingUtility();
773     setRootDirAndCleanIt(htu, "testCleanParent");
774     Server server = new MockServer(htu);
775     MasterServices services = new MockMasterServices(server);
776 
777     // create the janitor
778     CatalogJanitor janitor = new CatalogJanitor(server, services);
779 
780     // Create regions.
781     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
782     htd.addFamily(new HColumnDescriptor("f"));
783     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
784         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
785     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
786         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
787     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
788         Bytes.toBytes("ccc"),
789         Bytes.toBytes("eee"));
790 
791     // Test that when both daughter regions are in place, that we do not
792     // remove the parent.
793     Result parentMetaRow = createResult(parent, splita, splitb);
794     FileSystem fs = FileSystem.get(htu.getConfiguration());
795     Path rootdir = services.getMasterFileSystem().getRootDir();
796     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
797     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
798     // the single test passes, but when the full suite is run, things get borked).
799     FSUtils.setRootDir(fs.getConf(), rootdir);
800     Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
801     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
802     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
803       tabledir, htd.getColumnFamilies()[0].getName());
804     LOG.debug("Table dir:" + tabledir);
805     LOG.debug("Store dir:" + storedir);
806     LOG.debug("Store archive dir:" + storeArchive);
807 
808     // add a couple of store files that we can check for
809     FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
810     // get the current store files for comparison
811     FileStatus[] storeFiles = fs.listStatus(storedir);
812     int index = 0;
813     for (FileStatus file : storeFiles) {
814       LOG.debug("Have store file:" + file.getPath());
815       assertEquals("Got unexpected store file", mockFiles[index].getPath(),
816         storeFiles[index].getPath());
817       index++;
818     }
819 
820     // do the cleaning of the parent
821     assertTrue(janitor.cleanParent(parent, parentMetaRow));
822     LOG.debug("Finished cleanup of parent region");
823 
824     // and now check to make sure that the files have actually been archived
825     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
826     logFiles("archived files", storeFiles);
827     logFiles("archived files", archivedStoreFiles);
828 
829     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
830 
831     // cleanup
832     FSUtils.delete(fs, rootdir, true);
833     services.stop("Test finished");
834     server.stop("Test finished");
835     janitor.join();
836   }
837 
838   /**
839    * @param description description of the files for logging
840    * @param storeFiles the status of the files to log
841    */
842   private void logFiles(String description, FileStatus[] storeFiles) {
843     LOG.debug("Current " + description + ": ");
844     for (FileStatus file : storeFiles) {
845       LOG.debug(file.getPath());
846     }
847   }
848 
849   /**
850    * Test that if a store file with the same name is present as those already backed up cause the
851    * already archived files to be timestamped backup
852    */
853   @Test
854   public void testDuplicateHFileResolution() throws Exception {
855     String table = "table";
856     HBaseTestingUtility htu = new HBaseTestingUtility();
857     setRootDirAndCleanIt(htu, "testCleanParent");
858     Server server = new MockServer(htu);
859     MasterServices services = new MockMasterServices(server);
860 
861     // create the janitor
862 
863     CatalogJanitor janitor = new CatalogJanitor(server, services);
864 
865     // Create regions.
866     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
867     htd.addFamily(new HColumnDescriptor("f"));
868     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
869         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
870     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
871         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
872     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
873         Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
874     // Test that when both daughter regions are in place, that we do not
875     // remove the parent.
876     Result r = createResult(parent, splita, splitb);
877 
878     FileSystem fs = FileSystem.get(htu.getConfiguration());
879 
880     Path rootdir = services.getMasterFileSystem().getRootDir();
881     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
882     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
883     // the single test passes, but when the full suite is run, things get borked).
884     FSUtils.setRootDir(fs.getConf(), rootdir);
885     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
886     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
887     System.out.println("Old root:" + rootdir);
888     System.out.println("Old table:" + tabledir);
889     System.out.println("Old store:" + storedir);
890 
891     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
892       tabledir, htd.getColumnFamilies()[0].getName());
893     System.out.println("Old archive:" + storeArchive);
894 
895     // enable archiving, make sure that files get archived
896     addMockStoreFiles(2, services, storedir);
897     // get the current store files for comparison
898     FileStatus[] storeFiles = fs.listStatus(storedir);
899     // do the cleaning of the parent
900     assertTrue(janitor.cleanParent(parent, r));
901 
902     // and now check to make sure that the files have actually been archived
903     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
904     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
905 
906     // now add store files with the same names as before to check backup
907     // enable archiving, make sure that files get archived
908     addMockStoreFiles(2, services, storedir);
909 
910     // do the cleaning of the parent
911     assertTrue(janitor.cleanParent(parent, r));
912 
913     // and now check to make sure that the files have actually been archived
914     archivedStoreFiles = fs.listStatus(storeArchive);
915     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
916 
917     // cleanup
918     services.stop("Test finished");
919     server.stop("shutdown");
920     janitor.join();
921   }
922 
923   private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
924       throws IOException {
925     // get the existing store files
926     FileSystem fs = services.getMasterFileSystem().getFileSystem();
927     fs.mkdirs(storedir);
928     // create the store files in the parent
929     for (int i = 0; i < count; i++) {
930       Path storeFile = new Path(storedir, "_store" + i);
931       FSDataOutputStream dos = fs.create(storeFile, true);
932       dos.writeBytes("Some data: " + i);
933       dos.close();
934     }
935     LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
936     // make sure the mock store files are there
937     FileStatus[] storeFiles = fs.listStatus(storedir);
938     assertEquals("Didn't have expected store files", count, storeFiles.length);
939     return storeFiles;
940   }
941 
942   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
943       final String subdir)
944   throws IOException {
945     Path testdir = htu.getDataTestDir(subdir);
946     FileSystem fs = FileSystem.get(htu.getConfiguration());
947     if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
948     FSUtils.setRootDir(htu.getConfiguration(), testdir);
949     return FSUtils.getRootDir(htu.getConfiguration()).toString();
950   }
951 
952   /**
953    * @param services Master services instance.
954    * @param htd
955    * @param parent
956    * @param daughter
957    * @param midkey
958    * @param top True if we are to write a 'top' reference.
959    * @return Path to reference we created.
960    * @throws IOException
961    */
962   private Path createReferences(final MasterServices services,
963       final HTableDescriptor htd, final HRegionInfo parent,
964       final HRegionInfo daughter, final byte [] midkey, final boolean top)
965   throws IOException {
966     Path rootdir = services.getMasterFileSystem().getRootDir();
967     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
968     Path storedir = HStore.getStoreHomedir(tabledir, daughter,
969       htd.getColumnFamilies()[0].getName());
970     Reference ref =
971       top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
972     long now = System.currentTimeMillis();
973     // Reference name has this format: StoreFile#REF_NAME_PARSER
974     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
975     FileSystem fs = services.getMasterFileSystem().getFileSystem();
976     ref.write(fs, p);
977     return p;
978   }
979 
980   private Result createResult(final HRegionInfo parent, final HRegionInfo a,
981       final HRegionInfo b)
982   throws IOException {
983     return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b);
984   }
985 
986   private HTableDescriptor createHTableDescriptor() {
987     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t"));
988     htd.addFamily(new HColumnDescriptor("f"));
989     return htd;
990   }
991 
992   private MultiResponse buildMultiResponse(MultiRequest req) {
993     MultiResponse.Builder builder = MultiResponse.newBuilder();
994     RegionActionResult.Builder regionActionResultBuilder =
995         RegionActionResult.newBuilder();
996     ResultOrException.Builder roeBuilder = ResultOrException.newBuilder();
997     for (RegionAction regionAction: req.getRegionActionList()) {
998       regionActionResultBuilder.clear();
999       for (ClientProtos.Action action: regionAction.getActionList()) {
1000         roeBuilder.clear();
1001         roeBuilder.setResult(ClientProtos.Result.getDefaultInstance());
1002         roeBuilder.setIndex(action.getIndex());
1003         regionActionResultBuilder.addResultOrException(roeBuilder.build());
1004       }
1005       builder.addRegionActionResult(regionActionResultBuilder.build());
1006     }
1007     return builder.build();
1008   }
1009 
1010 }
1011