View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import java.io.IOException;
22  import java.io.PrintWriter;
23  import java.io.StringWriter;
24  import java.util.ArrayList;
25  import java.util.Iterator;
26  import java.util.List;
27  import java.util.concurrent.BlockingQueue;
28  import java.util.concurrent.Executors;
29  import java.util.concurrent.PriorityBlockingQueue;
30  import java.util.concurrent.RejectedExecutionException;
31  import java.util.concurrent.RejectedExecutionHandler;
32  import java.util.concurrent.ThreadFactory;
33  import java.util.concurrent.ThreadPoolExecutor;
34  import java.util.concurrent.TimeUnit;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.classification.InterfaceAudience;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.RemoteExceptionHandler;
41  import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
42  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
43  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
44  import org.apache.hadoop.hbase.util.Pair;
45  import org.apache.hadoop.util.StringUtils;
46  
47  import com.google.common.base.Preconditions;
48  
49  /**
50   * Compact region on request and then run split if appropriate
51   */
52  @InterfaceAudience.Private
53  public class CompactSplitThread implements CompactionRequestor {
54    static final Log LOG = LogFactory.getLog(CompactSplitThread.class);
55  
56    private final HRegionServer server;
57    private final Configuration conf;
58  
59    private final ThreadPoolExecutor largeCompactions;
60    private final ThreadPoolExecutor smallCompactions;
61    private final ThreadPoolExecutor splits;
62    private final ThreadPoolExecutor mergePool;
63  
64    /**
65     * Splitting should not take place if the total number of regions exceed this.
66     * This is not a hard limit to the number of regions but it is a guideline to
67     * stop splitting after number of online regions is greater than this.
68     */
69    private int regionSplitLimit;
70  
71    /** @param server */
72    CompactSplitThread(HRegionServer server) {
73      super();
74      this.server = server;
75      this.conf = server.getConfiguration();
76      this.regionSplitLimit = conf.getInt("hbase.regionserver.regionSplitLimit",
77          Integer.MAX_VALUE);
78  
79      int largeThreads = Math.max(1, conf.getInt(
80          "hbase.regionserver.thread.compaction.large", 1));
81      int smallThreads = conf.getInt(
82          "hbase.regionserver.thread.compaction.small", 1);
83  
84      int splitThreads = conf.getInt("hbase.regionserver.thread.split", 1);
85  
86      // if we have throttle threads, make sure the user also specified size
87      Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0);
88  
89      final String n = Thread.currentThread().getName();
90  
91      this.largeCompactions = new ThreadPoolExecutor(largeThreads, largeThreads,
92          60, TimeUnit.SECONDS, new PriorityBlockingQueue<Runnable>(),
93          new ThreadFactory() {
94            @Override
95            public Thread newThread(Runnable r) {
96              Thread t = new Thread(r);
97              t.setName(n + "-largeCompactions-" + System.currentTimeMillis());
98              return t;
99            }
100       });
101     this.largeCompactions.setRejectedExecutionHandler(new Rejection());
102     this.smallCompactions = new ThreadPoolExecutor(smallThreads, smallThreads,
103         60, TimeUnit.SECONDS, new PriorityBlockingQueue<Runnable>(),
104         new ThreadFactory() {
105           @Override
106           public Thread newThread(Runnable r) {
107             Thread t = new Thread(r);
108             t.setName(n + "-smallCompactions-" + System.currentTimeMillis());
109             return t;
110           }
111       });
112     this.smallCompactions
113         .setRejectedExecutionHandler(new Rejection());
114     this.splits = (ThreadPoolExecutor)
115         Executors.newFixedThreadPool(splitThreads,
116             new ThreadFactory() {
117           @Override
118           public Thread newThread(Runnable r) {
119             Thread t = new Thread(r);
120             t.setName(n + "-splits-" + System.currentTimeMillis());
121             return t;
122           }
123       });
124     int mergeThreads = conf.getInt("hbase.regionserver.thread.merge", 1);
125     this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
126         mergeThreads, new ThreadFactory() {
127           @Override
128           public Thread newThread(Runnable r) {
129             Thread t = new Thread(r);
130             t.setName(n + "-merges-" + System.currentTimeMillis());
131             return t;
132           }
133         });
134   }
135 
136   @Override
137   public String toString() {
138     return "compaction_queue=("
139         + largeCompactions.getQueue().size() + ":"
140         + smallCompactions.getQueue().size() + ")"
141         + ", split_queue=" + splits.getQueue().size()
142         + ", merge_queue=" + mergePool.getQueue().size();
143   }
144   
145   public String dumpQueue() {
146     StringBuffer queueLists = new StringBuffer();
147     queueLists.append("Compaction/Split Queue dump:\n");
148     queueLists.append("  LargeCompation Queue:\n");
149     BlockingQueue<Runnable> lq = largeCompactions.getQueue();
150     Iterator it = lq.iterator();
151     while(it.hasNext()){
152       queueLists.append("    "+it.next().toString());
153       queueLists.append("\n");
154     }
155 
156     if( smallCompactions != null ){
157       queueLists.append("\n");
158       queueLists.append("  SmallCompation Queue:\n");
159       lq = smallCompactions.getQueue();
160       it = lq.iterator();
161       while(it.hasNext()){
162         queueLists.append("    "+it.next().toString());
163         queueLists.append("\n");
164       }
165     }
166     
167     queueLists.append("\n");
168     queueLists.append("  Split Queue:\n");
169     lq = splits.getQueue();
170     it = lq.iterator();
171     while(it.hasNext()){
172       queueLists.append("    "+it.next().toString());
173       queueLists.append("\n");
174     }
175     
176     queueLists.append("\n");
177     queueLists.append("  Region Merge Queue:\n");
178     lq = mergePool.getQueue();
179     it = lq.iterator();
180     while (it.hasNext()) {
181       queueLists.append("    " + it.next().toString());
182       queueLists.append("\n");
183     }
184 
185     return queueLists.toString();
186   }
187 
188   public synchronized void requestRegionsMerge(final HRegion a,
189       final HRegion b, final boolean forcible) {
190     try {
191       mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible));
192       if (LOG.isDebugEnabled()) {
193         LOG.debug("Region merge requested for " + a + "," + b + ", forcible="
194             + forcible + ".  " + this);
195       }
196     } catch (RejectedExecutionException ree) {
197       LOG.warn("Could not execute merge for " + a + "," + b + ", forcible="
198           + forcible, ree);
199     }
200   }
201 
202   public synchronized boolean requestSplit(final HRegion r) {
203     // don't split regions that are blocking
204     if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) {
205       byte[] midKey = r.checkSplit();
206       if (midKey != null) {
207         requestSplit(r, midKey);
208         return true;
209       }
210     }
211     return false;
212   }
213 
214   public synchronized void requestSplit(final HRegion r, byte[] midKey) {
215     if (midKey == null) {
216       LOG.debug("Region " + r.getRegionNameAsString() +
217         " not splittable because midkey=null");
218       return;
219     }
220     try {
221       this.splits.execute(new SplitRequest(r, midKey, this.server));
222       if (LOG.isDebugEnabled()) {
223         LOG.debug("Split requested for " + r + ".  " + this);
224       }
225     } catch (RejectedExecutionException ree) {
226       LOG.info("Could not execute split for " + r, ree);
227     }
228   }
229 
230   @Override
231   public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why)
232       throws IOException {
233     return requestCompaction(r, why, null);
234   }
235 
236   @Override
237   public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why,
238       List<Pair<CompactionRequest, Store>> requests) throws IOException {
239     return requestCompaction(r, why, Store.NO_PRIORITY, requests);
240   }
241 
242   @Override
243   public synchronized CompactionRequest requestCompaction(final HRegion r, final Store s,
244       final String why, CompactionRequest request) throws IOException {
245     return requestCompaction(r, s, why, Store.NO_PRIORITY, request);
246   }
247 
248   @Override
249   public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why,
250       int p, List<Pair<CompactionRequest, Store>> requests) throws IOException {
251     return requestCompactionInternal(r, why, p, requests, true);
252   }
253 
254   private List<CompactionRequest> requestCompactionInternal(final HRegion r, final String why,
255       int p, List<Pair<CompactionRequest, Store>> requests, boolean selectNow) throws IOException {
256     // not a special compaction request, so make our own list
257     List<CompactionRequest> ret = null;
258     if (requests == null) {
259       ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
260       for (Store s : r.getStores().values()) {
261         CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow);
262         if (selectNow) ret.add(cr);
263       }
264     } else {
265       Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
266       ret = new ArrayList<CompactionRequest>(requests.size());
267       for (Pair<CompactionRequest, Store> pair : requests) {
268         ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst()));
269       }
270     }
271     return ret;
272   }
273 
274   public CompactionRequest requestCompaction(final HRegion r, final Store s,
275       final String why, int priority, CompactionRequest request) throws IOException {
276     return requestCompactionInternal(r, s, why, priority, request, true);
277   }
278 
279   public synchronized void requestSystemCompaction(
280       final HRegion r, final String why) throws IOException {
281     requestCompactionInternal(r, why, Store.NO_PRIORITY, null, false);
282   }
283 
284   public void requestSystemCompaction(
285       final HRegion r, final Store s, final String why) throws IOException {
286     requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false);
287   }
288 
289   /**
290    * @param r HRegion store belongs to
291    * @param s Store to request compaction on
292    * @param why Why compaction requested -- used in debug messages
293    * @param priority override the default priority (NO_PRIORITY == decide)
294    * @param request custom compaction request. Can be <tt>null</tt> in which case a simple
295    *          compaction will be used.
296    */
297   private synchronized CompactionRequest requestCompactionInternal(final HRegion r, final Store s,
298       final String why, int priority, CompactionRequest request, boolean selectNow)
299           throws IOException {
300     if (this.server.isStopped()
301         || (r.getTableDesc() != null && !r.getTableDesc().isCompactionEnabled())) {
302       return null;
303     }
304 
305     CompactionContext compaction = null;
306     if (selectNow) {
307       compaction = selectCompaction(r, s, priority, request);
308       if (compaction == null) return null; // message logged inside
309     }
310 
311     // We assume that most compactions are small. So, put system compactions into small
312     // pool; we will do selection there, and move to large pool if necessary.
313     long size = selectNow ? compaction.getRequest().getSize() : 0;
314     ThreadPoolExecutor pool = (!selectNow && s.throttleCompaction(size))
315       ? largeCompactions : smallCompactions;
316     pool.execute(new CompactionRunner(s, r, compaction, pool));
317     if (LOG.isDebugEnabled()) {
318       String type = (pool == smallCompactions) ? "Small " : "Large ";
319       LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system")
320           + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this);
321     }
322     return selectNow ? compaction.getRequest() : null;
323   }
324 
325   private CompactionContext selectCompaction(final HRegion r, final Store s,
326       int priority, CompactionRequest request) throws IOException {
327     CompactionContext compaction = s.requestCompaction(priority, request);
328     if (compaction == null) {
329       if(LOG.isDebugEnabled()) {
330         LOG.debug("Not compacting " + r.getRegionNameAsString() +
331             " because compaction request was cancelled");
332       }
333       return null;
334     }
335     assert compaction.hasSelection();
336     if (priority != Store.NO_PRIORITY) {
337       compaction.getRequest().setPriority(priority);
338     }
339     return compaction;
340   }
341 
342   /**
343    * Only interrupt once it's done with a run through the work loop.
344    */
345   void interruptIfNecessary() {
346     splits.shutdown();
347     mergePool.shutdown();
348     largeCompactions.shutdown();
349     smallCompactions.shutdown();
350   }
351 
352   private void waitFor(ThreadPoolExecutor t, String name) {
353     boolean done = false;
354     while (!done) {
355       try {
356         done = t.awaitTermination(60, TimeUnit.SECONDS);
357         LOG.info("Waiting for " + name + " to finish...");
358         if (!done) {
359           t.shutdownNow();
360         }
361       } catch (InterruptedException ie) {
362         LOG.warn("Interrupted waiting for " + name + " to finish...");
363       }
364     }
365   }
366 
367   void join() {
368     waitFor(splits, "Split Thread");
369     waitFor(mergePool, "Merge Thread");
370     waitFor(largeCompactions, "Large Compaction Thread");
371     waitFor(smallCompactions, "Small Compaction Thread");
372   }
373 
374   /**
375    * Returns the current size of the queue containing regions that are
376    * processed.
377    *
378    * @return The current size of the regions queue.
379    */
380   public int getCompactionQueueSize() {
381     return largeCompactions.getQueue().size() + smallCompactions.getQueue().size();
382   }
383 
384   private boolean shouldSplitRegion() {
385     return (regionSplitLimit > server.getNumberOfOnlineRegions());
386   }
387 
388   /**
389    * @return the regionSplitLimit
390    */
391   public int getRegionSplitLimit() {
392     return this.regionSplitLimit;
393   }
394 
395   private class CompactionRunner implements Runnable, Comparable<CompactionRunner> {
396     private final Store store;
397     private final HRegion region;
398     private CompactionContext compaction;
399     private int queuedPriority;
400     private ThreadPoolExecutor parent;
401 
402     public CompactionRunner(Store store, HRegion region,
403         CompactionContext compaction, ThreadPoolExecutor parent) {
404       super();
405       this.store = store;
406       this.region = region;
407       this.compaction = compaction;
408       this.queuedPriority = (this.compaction == null)
409           ? store.getCompactPriority() : compaction.getRequest().getPriority();
410       this.parent = parent;
411     }
412 
413     @Override
414     public String toString() {
415       return (this.compaction != null) ? ("Request = " + compaction.getRequest())
416           : ("Store = " + store.toString() + ", pri = " + queuedPriority);
417     }
418 
419     @Override
420     public void run() {
421       Preconditions.checkNotNull(server);
422       if (server.isStopped()
423           || (region.getTableDesc() != null && !region.getTableDesc().isCompactionEnabled())) {
424         return;
425       }
426       // Common case - system compaction without a file selection. Select now.
427       if (this.compaction == null) {
428         int oldPriority = this.queuedPriority;
429         this.queuedPriority = this.store.getCompactPriority();
430         if (this.queuedPriority > oldPriority) {
431           // Store priority decreased while we were in queue (due to some other compaction?),
432           // requeue with new priority to avoid blocking potential higher priorities.
433           this.parent.execute(this);
434           return;
435         }
436         try {
437           this.compaction = selectCompaction(this.region, this.store, queuedPriority, null);
438         } catch (IOException ex) {
439           LOG.error("Compaction selection failed " + this, ex);
440           server.checkFileSystem();
441           return;
442         }
443         if (this.compaction == null) return; // nothing to do
444         // Now see if we are in correct pool for the size; if not, go to the correct one.
445         // We might end up waiting for a while, so cancel the selection.
446         assert this.compaction.hasSelection();
447         ThreadPoolExecutor pool = store.throttleCompaction(
448             compaction.getRequest().getSize()) ? largeCompactions : smallCompactions;
449         if (this.parent != pool) {
450           this.store.cancelRequestedCompaction(this.compaction);
451           this.compaction = null;
452           this.parent = pool;
453           this.parent.execute(this);
454           return;
455         }
456       }
457       // Finally we can compact something.
458       assert this.compaction != null;
459 
460       this.compaction.getRequest().beforeExecute();
461       try {
462         // Note: please don't put single-compaction logic here;
463         //       put it into region/store/etc. This is CST logic.
464         long start = EnvironmentEdgeManager.currentTimeMillis();
465         boolean completed = region.compact(compaction, store);
466         long now = EnvironmentEdgeManager.currentTimeMillis();
467         LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " +
468               this + "; duration=" + StringUtils.formatTimeDiff(now, start));
469         if (completed) {
470           // degenerate case: blocked regions require recursive enqueues
471           if (store.getCompactPriority() <= 0) {
472             requestSystemCompaction(region, store, "Recursive enqueue");
473           } else {
474             // see if the compaction has caused us to exceed max region size
475             requestSplit(region);
476           }
477         }
478       } catch (IOException ex) {
479         IOException remoteEx = RemoteExceptionHandler.checkIOException(ex);
480         LOG.error("Compaction failed " + this, remoteEx);
481         if (remoteEx != ex) {
482           LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex));
483         }
484         server.checkFileSystem();
485       } catch (Exception ex) {
486         LOG.error("Compaction failed " + this, ex);
487         server.checkFileSystem();
488       } finally {
489         LOG.debug("CompactSplitThread Status: " + CompactSplitThread.this);
490       }
491       this.compaction.getRequest().afterExecute();
492     }
493 
494     private String formatStackTrace(Exception ex) {
495       StringWriter sw = new StringWriter();
496       PrintWriter pw = new PrintWriter(sw);
497       ex.printStackTrace(pw);
498       pw.flush();
499       return sw.toString();
500     }
501 
502     @Override
503     public int compareTo(CompactionRunner o) {
504       // Only compare the underlying request (if any), for queue sorting purposes.
505       int compareVal = queuedPriority - o.queuedPriority; // compare priority
506       if (compareVal != 0) return compareVal;
507       CompactionContext tc = this.compaction, oc = o.compaction;
508       // Sort pre-selected (user?) compactions before system ones with equal priority.
509       return (tc == null) ? ((oc == null) ? 0 : 1)
510           : ((oc == null) ? -1 : tc.getRequest().compareTo(oc.getRequest()));
511     }
512   }
513 
514   /**
515    * Cleanup class to use when rejecting a compaction request from the queue.
516    */
517   private static class Rejection implements RejectedExecutionHandler {
518     @Override
519     public void rejectedExecution(Runnable runnable, ThreadPoolExecutor pool) {
520       if (runnable instanceof CompactionRunner) {
521         CompactionRunner runner = (CompactionRunner)runnable;
522         LOG.debug("Compaction Rejected: " + runner);
523         runner.store.cancelRequestedCompaction(runner.compaction);
524       }
525     }
526   }
527 }